Skip to content
Snippets Groups Projects
Commit 124c6d39 authored by Maxence Naud's avatar Maxence Naud
Browse files

[Add] Tests for arithmetic operators' computeOutputDims() member function

parent 698d0028
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!65[Add] broadcasting for Arithmetic Operators
Pipeline #39615 failed
This commit is part of merge request !65. Comments created here will be created in the context of that merge request.
...@@ -26,142 +26,119 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims] ...@@ -26,142 +26,119 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
// Create a random number generator // Create a random number generator
std::random_device rd; std::random_device rd;
std::mt19937 gen(rd()); std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dist(1, 10); std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDims(1, 5); std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
// Create Div Operator // Create Div Operator
std::shared_ptr<Node> myDiv = Div(); std::shared_ptr<Node> myDiv = Div();
auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
/** @todo Special case of scalar Tensor objects. // input_0
* Not handled yet. std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
*/ op -> associateInput(0,T0);
// SECTION("0-D / 0-D") { // input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
/**
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::computeOutputDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0 // // input_0
// std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
// T0->resize({}); // T0->resize({});
// op -> associateInput(0,T0);
// // input_1 - right // // input_1
// std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
// T1->resize({}); // T1->resize({});
// op -> associateInput(1,T1);
// REQUIRE_NOTHROW(op->computeOutputDims()); // REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()).empty()); // REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// // input_1 - wrong
// T1->resize({dist(gen)});
// REQUIRE_THROWS(op->computeOutputDims());
// } // }
// SECTION("0-D / +1-D") { // SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0 // // input_0
// std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
// op -> associateInput(0,T0);
// T0->resize({}); // T0->resize({});
// // input_1
// std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
// op -> associateInput(1,T1);
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { // for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// const std::size_t nb_dims = nbDims(gen);
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims); // std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) { // for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dist(gen); // dims[i] = dimsDist(gen);
// } // }
// // input_1 - right
// T1->resize(dims); // T1->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims()); // REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims); // REQUIRE((op->getOutput(0)->dims()) == dims);
// // input_1 - wrong
// T1->resize({dims[0] + 1});
// REQUIRE_THROWS(op->computeOutputDims());
// } // }
// } // }
// SECTION("+1-D / 0-D") { // SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1 // // input_1
// std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
// T1->resize({}); // T1->resize({});
// op -> associateInput(1,T1);
// // input_0
// std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
// op -> associateInput(0,T0);
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { // for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// const std::size_t nb_dims = nbDims(gen);
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims); // std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) { // for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dist(gen); // dims[i] = dimsDist(gen);
// } // }
// // input_0 - right
// T0->resize(dims); // T0->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims()); // REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims); // REQUIRE((op->getOutput(0)->dims()) == dims);
// // input_0 - wrong
// T1->resize({dims[0] + 1});
// REQUIRE_THROWS(op->computeOutputDims());
// } // }
// } // }
SECTION("+1-D / +1-D") { SECTION("+1-D / +1-D") {
// input_0 // same size
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDims(gen) + 1; const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims(nb_dims); std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) { for (std::size_t i = 0; i < nb_dims; ++i) {
dims[i] = dist(gen); dims0[i] = dimsDist(gen) + 1;
} }
T0->resize(dims);
// input_1 - right - same dims T0->resize(dims0);
T1->resize(dims); T1->resize(dims0);
REQUIRE_NOTHROW(op->computeOutputDims()); REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims); REQUIRE((op->getOutput(0)->dims()) == dims0);
}
// input_1 - right - broadcast // broadcast
std::vector<std::size_t> dims_for_broadcast(nb_dims); for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) { for (std::size_t i = 0; i < nb_dims; ++i) {
dims_for_broadcast[i] = ((i % 2) == 0) ? dims[i] : 1; dims0[i] = dimsDist(gen) + 2;
} }
T1->resize(dims); std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
REQUIRE_NOTHROW(op->computeOutputDims()); for (std::size_t i = 0; i < nb_dims; ++i) {
REQUIRE((op->getOutput(0)->dims()) == dims); if (dimsDist(gen) <= 5) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
// input_1 - right - less dimensions T0->resize(dims0);
const std::vector<std::size_t> dims_less_dimensions(dims.cbegin() + nbDims(gen), dims.cend()); T1->resize(dims1);
T1->resize(dims_less_dimensions);
REQUIRE_NOTHROW(op->computeOutputDims()); REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims); REQUIRE((op->getOutput(0)->dims()) == dimsOut);
// input_0 - wrong // input_0 - wrong
// T1->resize({dims[0] + 1}); // T1->resize({dims[0] + 1});
std::vector<std::size_t> dims_wrong = dims; std::vector<std::size_t> dims1_wrong = dims1;
for (std::size_t i = 0; i< nb_dims; ++i) { for (std::size_t i = 0; i < dims1.size(); ++i) {
++dims_wrong[i]; ++dims1_wrong[i];
} }
T1->resize(dims); T1->resize(dims1_wrong);
if (dims != std::vector<size_t>(nb_dims, 1)) REQUIRE(dims0 != dims1_wrong);
REQUIRE_THROWS(op->computeOutputDims()); REQUIRE_THROWS(op->computeOutputDims());
} }
} }
} }
} // namespace Aidge } // namespace Aidge
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Mul.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace Aidge {
TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
// Create Mul Operator
std::shared_ptr<Node> myMul = Mul();
auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
/**
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::computeOutputDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0
// T0->resize({});
// // input_1
// T1->resize({});
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// }
// SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0
// T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T1->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
// SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1
// T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T0->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
SECTION("+1-D / +1-D") {
// same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 1;
}
T0->resize(dims0);
T1->resize(dims0);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims0);
}
// broadcast
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 2;
}
std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
for (std::size_t i = 0; i < nb_dims; ++i) {
if (dimsDist(gen) <= 5) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
T0->resize(dims0);
T1->resize(dims1);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dimsOut);
// input_0 - wrong
// T1->resize({dims[0] + 1});
std::vector<std::size_t> dims1_wrong = dims1;
for (std::size_t i = 0; i < dims1.size(); ++i) {
++dims1_wrong[i];
}
T1->resize(dims1_wrong);
REQUIRE(dims0 != dims1_wrong);
REQUIRE_THROWS(op->computeOutputDims());
}
}
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Pow.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace Aidge {
TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
// Create Pow Operator
std::shared_ptr<Node> myPow = Pow();
auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
/**
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::computeOutputDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0
// T0->resize({});
// // input_1
// T1->resize({});
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// }
// SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0
// T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T1->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
// SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1
// T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T0->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
SECTION("+1-D / +1-D") {
// same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 1;
}
T0->resize(dims0);
T1->resize(dims0);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims0);
}
// broadcast
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 2;
}
std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
for (std::size_t i = 0; i < nb_dims; ++i) {
if (dimsDist(gen) <= 5) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
T0->resize(dims0);
T1->resize(dims1);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dimsOut);
// input_0 - wrong
// T1->resize({dims[0] + 1});
std::vector<std::size_t> dims1_wrong = dims1;
for (std::size_t i = 0; i < dims1.size(); ++i) {
++dims1_wrong[i];
}
T1->resize(dims1_wrong);
REQUIRE(dims0 != dims1_wrong);
REQUIRE_THROWS(op->computeOutputDims());
}
}
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Sub.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace Aidge {
TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
// Create Sub Operator
std::shared_ptr<Node> mySub = Sub();
auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
/**
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::computeOutputDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0
// T0->resize({});
// // input_1
// T1->resize({});
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// }
// SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0
// T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T1->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
// SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1
// T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T0->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
SECTION("+1-D / +1-D") {
// same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 1;
}
T0->resize(dims0);
T1->resize(dims0);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims0);
}
// broadcast
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 2;
}
std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
for (std::size_t i = 0; i < nb_dims; ++i) {
if (dimsDist(gen) <= 5) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
T0->resize(dims0);
T1->resize(dims1);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dimsOut);
// input_0 - wrong
// T1->resize({dims[0] + 1});
std::vector<std::size_t> dims1_wrong = dims1;
for (std::size_t i = 0; i < dims1.size(); ++i) {
++dims1_wrong[i];
}
T1->resize(dims1_wrong);
REQUIRE(dims0 != dims1_wrong);
REQUIRE_THROWS(op->computeOutputDims());
}
}
}
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment