Skip to content
Snippets Groups Projects
Commit 845af882 authored by Christophe Guillon's avatar Christophe Guillon Committed by Maxence Naud
Browse files

[Tests] Update operator tests forwardDims/forward for scalars

Activate previously commented tests on scalars for
elementwise operators: Test_[Div|Mul|Sub|Pow]_Op.cpp.
Add scalar test for Test_MatMul_Op.cpp.
Add negative scalar tests for Test_[Concat|Transpose]Impl.cpp.
Update implementations forward/forwardDims of operators
Concat|Transpose|MatMul to assert on unexpected scalar inputs.
parent cb0748a7
No related branches found
No related tags found
2 merge requests!212Version 0.3.0,!162[Tensor] Disambiguate undefined Tensor dimensions from Scalar Tensor
Pipeline #50492 passed
...@@ -64,14 +64,8 @@ bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) { ...@@ -64,14 +64,8 @@ bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
return false; return false;
} }
const std::size_t nbDimsInput0 = getInput(0)->nbDims(); const std::size_t nbDimsInput0 = getInput(0)->nbDims();
if (nbDimsInput0 == 0) { AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is scalar", type());
return false;
}
AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is empty", type());
for (IOIndex_t i = 1; i < nbInputs(); ++i) { for (IOIndex_t i = 1; i < nbInputs(); ++i) {
if (getInput(i)->nbDims() == 0) {
return false;
}
AIDGE_ASSERT(nbDimsInput0 == getInput(i)->nbDims(), AIDGE_ASSERT(nbDimsInput0 == getInput(i)->nbDims(),
"Input 0 and input {} in {} Operator have different number of dimensions: {} / {}", "Input 0 and input {} in {} Operator have different number of dimensions: {} / {}",
i, type(), nbDimsInput0, getInput(i)->nbDims()); i, type(), nbDimsInput0, getInput(i)->nbDims());
......
...@@ -69,7 +69,10 @@ bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) { ...@@ -69,7 +69,10 @@ bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
mOutputs[0]->resize(outDims); mOutputs[0]->resize(outDims);
return true; return true;
} else {
AIDGE_ASSERT(false, "Incompatible scalar and N-D sizes.");
} }
} }
return false; return false;
......
...@@ -32,6 +32,7 @@ const std::string Aidge::Transpose_Op::Type = "Transpose"; ...@@ -32,6 +32,7 @@ const std::string Aidge::Transpose_Op::Type = "Transpose";
bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) { bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
if (inputsAssociated()) { if (inputsAssociated()) {
AIDGE_ASSERT(!getInput(0)->empty(), "Not applicable on scalars.");
std::vector<DimSize_t> outputDims; std::vector<DimSize_t> outputDims;
for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) { for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]); outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
......
...@@ -18,6 +18,14 @@ ...@@ -18,6 +18,14 @@
using namespace Aidge; using namespace Aidge;
TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") { TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
SECTION("Concat scalar inputs") {
std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(2);
std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(4);
auto myConcat = Concat(2, 0);
myConcat->getOperator()->associateInput(0, input1);
myConcat->getOperator()->associateInput(1, input2);
REQUIRE_THROWS(myConcat->forward());
}
SECTION("Concat 1D inputs") { SECTION("Concat 1D inputs") {
std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array1D<int,2>{{ 2, 3 }}); std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array1D<int,2>{{ 2, 3 }});
std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array1D<int,3>{{ 4, 5, 6 }}); std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array1D<int,3>{{ 4, 5, 6 }});
...@@ -140,4 +148,4 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") { ...@@ -140,4 +148,4 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput); REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
} }
} }
\ No newline at end of file
...@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") { ...@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") {
* @todo Special case: scalar not handled yet by * @todo Special case: scalar not handled yet by
* ``OperatorTensor::forwardDims()`` * ``OperatorTensor::forwardDims()``
*/ */
// SECTION("Scalar / Scalar") { SECTION("Scalar / Scalar") {
// // input_0 // input_0
// T0->resize({}); T0->resize({});
// // input_1 // input_1
// T1->resize({}); T1->resize({});
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// } }
// SECTION("Scalar / +1-D") { SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor // a scalar is compatible with any other Tensor
// // input_0 // input_0
// T0->resize({}); T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1 // input_1
// const std::size_t nb_dims = nbDimsDist(gen); const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims); std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) { for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen); dims[i] = dimsDist(gen);
// } }
// T1->resize(dims); T1->resize(dims);
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims()) == dims); REQUIRE((op->getOutput(0)->dims()) == dims);
// } }
// } }
// SECTION("+1-D / Scalar") { SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor // a scalar is compatible with any other Tensor
// // input_1 // input_1
// T1->resize({}); T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0 // input_0
// const std::size_t nb_dims = nbDimsDist(gen); const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims); std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) { for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen); dims[i] = dimsDist(gen);
// } }
// T0->resize(dims); T0->resize(dims);
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims()) == dims); REQUIRE((op->getOutput(0)->dims()) == dims);
// } }
// } }
SECTION("+1-D / +1-D") { SECTION("+1-D / +1-D") {
// same size // same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
......
...@@ -33,24 +33,24 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") { ...@@ -33,24 +33,24 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
/** @todo Special case of scalar Tensor objects. /** @todo Special case of scalar Tensor objects.
* Not handled yet. * Not handled yet.
*/ */
// SECTION("0-D / 0-D") { SECTION("0-D / 0-D") {
// std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
// T0->resize({}); T0->resize({});
// op -> associateInput(0,T0); op -> associateInput(0,T0);
// // input_1 - right // input_1 - right
// std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
// T1->resize({}); T1->resize({});
// op -> associateInput(1,T1); op -> associateInput(1,T1);
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims()).empty()); REQUIRE((op->getOutput(0)->dims()).empty());
// // input_1 - wrong // input_1 - wrong
// T1->resize({dist(gen)}); T1->resize({dist(gen)});
// REQUIRE_THROWS(op->forwardDims()); REQUIRE_THROWS(op->forwardDims());
// } }
SECTION("1-D / N-D") { SECTION("1-D / N-D") {
// input_0 // input_0
...@@ -193,4 +193,4 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") { ...@@ -193,4 +193,4 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
REQUIRE_THROWS(op -> forwardDims()); REQUIRE_THROWS(op -> forwardDims());
} }
} }
} // namespace Aidge } // namespace Aidge
\ No newline at end of file
...@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Mul_Op(forwardDims)", "[Mul][forwardDims]") { ...@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Mul_Op(forwardDims)", "[Mul][forwardDims]") {
* @todo Special case: scalar not handled yet by * @todo Special case: scalar not handled yet by
* ``OperatorTensor::forwardDims()`` * ``OperatorTensor::forwardDims()``
*/ */
// SECTION("Scalar / Scalar") { SECTION("Scalar / Scalar") {
// // input_0 // input_0
// T0->resize({}); T0->resize({});
// // input_1 // input_1
// T1->resize({}); T1->resize({});
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// } }
// SECTION("Scalar / +1-D") { SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor // a scalar is compatible with any other Tensor
// // input_0 // input_0
// T0->resize({}); T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1 // input_1
// const std::size_t nb_dims = nbDimsDist(gen); const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims); std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) { for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen); dims[i] = dimsDist(gen);
// } }
// T1->resize(dims); T1->resize(dims);
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims()) == dims); REQUIRE((op->getOutput(0)->dims()) == dims);
// } }
// } }
// SECTION("+1-D / Scalar") { SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor // a scalar is compatible with any other Tensor
// // input_1 // input_1
// T1->resize({}); T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0 // input_0
// const std::size_t nb_dims = nbDimsDist(gen); const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims); std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) { for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen); dims[i] = dimsDist(gen);
// } }
// T0->resize(dims); T0->resize(dims);
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims()) == dims); REQUIRE((op->getOutput(0)->dims()) == dims);
// } }
// } }
SECTION("+1-D / +1-D") { SECTION("+1-D / +1-D") {
// same size // same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
......
...@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Pow_Op(forwardDims)", "[Pow][forwardDims]") { ...@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Pow_Op(forwardDims)", "[Pow][forwardDims]") {
* @todo Special case: scalar not handled yet by * @todo Special case: scalar not handled yet by
* ``OperatorTensor::forwardDims()`` * ``OperatorTensor::forwardDims()``
*/ */
// SECTION("Scalar / Scalar") { SECTION("Scalar / Scalar") {
// // input_0 // input_0
// T0->resize({}); T0->resize({});
// // input_1 // input_1
// T1->resize({}); T1->resize({});
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// } }
// SECTION("Scalar / +1-D") { SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor // a scalar is compatible with any other Tensor
// // input_0 // input_0
// T0->resize({}); T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1 // input_1
// const std::size_t nb_dims = nbDimsDist(gen); const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims); std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) { for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen); dims[i] = dimsDist(gen);
// } }
// T1->resize(dims); T1->resize(dims);
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims()) == dims); REQUIRE((op->getOutput(0)->dims()) == dims);
// } }
// } }
// SECTION("+1-D / Scalar") { SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor // a scalar is compatible with any other Tensor
// // input_1 // input_1
// T1->resize({}); T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0 // input_0
// const std::size_t nb_dims = nbDimsDist(gen); const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims); std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) { for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen); dims[i] = dimsDist(gen);
// } }
// T0->resize(dims); T0->resize(dims);
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims()) == dims); REQUIRE((op->getOutput(0)->dims()) == dims);
// } }
// } }
SECTION("+1-D / +1-D") { SECTION("+1-D / +1-D") {
// same size // same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
......
...@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Sub_Op(forwardDims)", "[Sub][forwardDims]") { ...@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Sub_Op(forwardDims)", "[Sub][forwardDims]") {
* @todo Special case: scalar not handled yet by * @todo Special case: scalar not handled yet by
* ``OperatorTensor::forwardDims()`` * ``OperatorTensor::forwardDims()``
*/ */
// SECTION("Scalar / Scalar") { SECTION("Scalar / Scalar") {
// // input_0 // input_0
// T0->resize({}); T0->resize({});
// // input_1 // input_1
// T1->resize({}); T1->resize({});
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>())); REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// } }
// SECTION("Scalar / +1-D") { SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor // a scalar is compatible with any other Tensor
// // input_0 // input_0
// T0->resize({}); T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1 // input_1
// const std::size_t nb_dims = nbDimsDist(gen); const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims); std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) { for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen); dims[i] = dimsDist(gen);
// } }
// T1->resize(dims); T1->resize(dims);
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims()) == dims); REQUIRE((op->getOutput(0)->dims()) == dims);
// } }
// } }
// SECTION("+1-D / Scalar") { SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor // a scalar is compatible with any other Tensor
// // input_1 // input_1
// T1->resize({}); T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0 // input_0
// const std::size_t nb_dims = nbDimsDist(gen); const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims); std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) { for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen); dims[i] = dimsDist(gen);
// } }
// T0->resize(dims); T0->resize(dims);
// REQUIRE_NOTHROW(op->forwardDims()); REQUIRE_NOTHROW(op->forwardDims());
// REQUIRE((op->getOutput(0)->dims()) == dims); REQUIRE((op->getOutput(0)->dims()) == dims);
// } }
// } }
SECTION("+1-D / +1-D") { SECTION("+1-D / +1-D") {
// same size // same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
......
...@@ -18,6 +18,16 @@ ...@@ -18,6 +18,16 @@
using namespace Aidge; using namespace Aidge;
TEST_CASE("[cpu/operator] Transpose(forward)") { TEST_CASE("[cpu/operator] Transpose(forward)") {
SECTION("Scalar Tensor") {
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(2);
std::shared_ptr<Tensor> output = std::make_shared<Tensor>(2);
std::shared_ptr<Node> myTranspose = Transpose({});
auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
op->associateInput(0,input);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
REQUIRE_THROWS(myTranspose->forward());
}
SECTION("3D Tensor") { SECTION("3D Tensor") {
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> { std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> {
{ {
...@@ -120,4 +130,4 @@ TEST_CASE("[cpu/operator] Transpose(forward)") { ...@@ -120,4 +130,4 @@ TEST_CASE("[cpu/operator] Transpose(forward)") {
REQUIRE(*(op->getOutput(0)) == *output); REQUIRE(*(op->getOutput(0)) == *output);
} }
} }
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment