Skip to content
Snippets Groups Projects
Commit 124c6d39 authored by Maxence Naud's avatar Maxence Naud
Browse files

[Add] Tests for arithmetic operators' computeOutputDims() member function

parent 698d0028
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!65[Add] broadcasting for Arithmetic Operators
Pipeline #39615 failed
......@@ -26,142 +26,119 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dist(1, 10);
std::uniform_int_distribution<std::size_t> nbDims(1, 5);
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
// Create Div Operator
std::shared_ptr<Node> myDiv = Div();
auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
/** @todo Special case of scalar Tensor objects.
* Not handled yet.
*/
// SECTION("0-D / 0-D") {
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
/**
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::computeOutputDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0
// std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
// T0->resize({});
// op -> associateInput(0,T0);
// // input_1 - right
// std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
// // input_1
// T1->resize({});
// op -> associateInput(1,T1);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()).empty());
// // input_1 - wrong
// T1->resize({dist(gen)});
// REQUIRE_THROWS(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// }
// SECTION("0-D / +1-D") {
// SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0
// std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
// op -> associateInput(0,T0);
// T0->resize({});
// // input_1
// std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
// op -> associateInput(1,T1);
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// const std::size_t nb_dims = nbDims(gen);
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dist(gen);
// dims[i] = dimsDist(gen);
// }
// // input_1 - right
// T1->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// // input_1 - wrong
// T1->resize({dims[0] + 1});
// REQUIRE_THROWS(op->computeOutputDims());
// }
// }
// SECTION("+1-D / 0-D") {
// SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1
// std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
// T1->resize({});
// op -> associateInput(1,T1);
// // input_0
// std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
// op -> associateInput(0,T0);
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// const std::size_t nb_dims = nbDims(gen);
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dist(gen);
// dims[i] = dimsDist(gen);
// }
// // input_0 - right
// T0->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// // input_0 - wrong
// T1->resize({dims[0] + 1});
// REQUIRE_THROWS(op->computeOutputDims());
// }
// }
SECTION("+1-D / +1-D") {
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
// same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDims(gen) + 1;
std::vector<std::size_t> dims(nb_dims);
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims[i] = dist(gen);
dims0[i] = dimsDist(gen) + 1;
}
T0->resize(dims);
// input_1 - right - same dims
T1->resize(dims);
T0->resize(dims0);
T1->resize(dims0);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims);
REQUIRE((op->getOutput(0)->dims()) == dims0);
}
// input_1 - right - broadcast
std::vector<std::size_t> dims_for_broadcast(nb_dims);
// broadcast
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims_for_broadcast[i] = ((i % 2) == 0) ? dims[i] : 1;
dims0[i] = dimsDist(gen) + 2;
}
T1->resize(dims);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims);
std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
for (std::size_t i = 0; i < nb_dims; ++i) {
if (dimsDist(gen) <= 5) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
// input_1 - right - less dimensions
const std::vector<std::size_t> dims_less_dimensions(dims.cbegin() + nbDims(gen), dims.cend());
T1->resize(dims_less_dimensions);
T0->resize(dims0);
T1->resize(dims1);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims);
REQUIRE((op->getOutput(0)->dims()) == dimsOut);
// input_0 - wrong
// T1->resize({dims[0] + 1});
std::vector<std::size_t> dims_wrong = dims;
for (std::size_t i = 0; i< nb_dims; ++i) {
++dims_wrong[i];
std::vector<std::size_t> dims1_wrong = dims1;
for (std::size_t i = 0; i < dims1.size(); ++i) {
++dims1_wrong[i];
}
T1->resize(dims);
if (dims != std::vector<size_t>(nb_dims, 1))
REQUIRE_THROWS(op->computeOutputDims());
T1->resize(dims1_wrong);
REQUIRE(dims0 != dims1_wrong);
REQUIRE_THROWS(op->computeOutputDims());
}
}
}
} // namespace Aidge
\ No newline at end of file
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Mul.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace Aidge {
TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
// Create Mul Operator
std::shared_ptr<Node> myMul = Mul();
auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
/**
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::computeOutputDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0
// T0->resize({});
// // input_1
// T1->resize({});
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// }
// SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0
// T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T1->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
// SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1
// T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T0->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
SECTION("+1-D / +1-D") {
// same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 1;
}
T0->resize(dims0);
T1->resize(dims0);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims0);
}
// broadcast
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 2;
}
std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
for (std::size_t i = 0; i < nb_dims; ++i) {
if (dimsDist(gen) <= 5) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
T0->resize(dims0);
T1->resize(dims1);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dimsOut);
// input_0 - wrong
// T1->resize({dims[0] + 1});
std::vector<std::size_t> dims1_wrong = dims1;
for (std::size_t i = 0; i < dims1.size(); ++i) {
++dims1_wrong[i];
}
T1->resize(dims1_wrong);
REQUIRE(dims0 != dims1_wrong);
REQUIRE_THROWS(op->computeOutputDims());
}
}
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Pow.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace Aidge {
TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
// Create Pow Operator
std::shared_ptr<Node> myPow = Pow();
auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
/**
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::computeOutputDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0
// T0->resize({});
// // input_1
// T1->resize({});
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// }
// SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0
// T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T1->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
// SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1
// T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T0->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
SECTION("+1-D / +1-D") {
// same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 1;
}
T0->resize(dims0);
T1->resize(dims0);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims0);
}
// broadcast
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 2;
}
std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
for (std::size_t i = 0; i < nb_dims; ++i) {
if (dimsDist(gen) <= 5) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
T0->resize(dims0);
T1->resize(dims1);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dimsOut);
// input_0 - wrong
// T1->resize({dims[0] + 1});
std::vector<std::size_t> dims1_wrong = dims1;
for (std::size_t i = 0; i < dims1.size(); ++i) {
++dims1_wrong[i];
}
T1->resize(dims1_wrong);
REQUIRE(dims0 != dims1_wrong);
REQUIRE_THROWS(op->computeOutputDims());
}
}
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <memory>
#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Sub.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace Aidge {
TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
// Create Sub Operator
std::shared_ptr<Node> mySub = Sub();
auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
// input_0
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op -> associateInput(0,T0);
// input_1
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op -> associateInput(1,T1);
/**
* @todo Special case: scalar not handled yet by
* ``OperatorTensor::computeOutputDims()``
*/
// SECTION("Scalar / Scalar") {
// // input_0
// T0->resize({});
// // input_1
// T1->resize({});
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
// }
// SECTION("Scalar / +1-D") {
// // a scalar is compatible with any other Tensor
// // input_0
// T0->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_1
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T1->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
// SECTION("+1-D / Scalar") {
// // a scalar is compatible with any other Tensor
// // input_1
// T1->resize({});
// for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// // input_0
// const std::size_t nb_dims = nbDimsDist(gen);
// std::vector<std::size_t> dims(nb_dims);
// for (std::size_t i = 0; i < nb_dims; ++i) {
// dims[i] = dimsDist(gen);
// }
// T0->resize(dims);
// REQUIRE_NOTHROW(op->computeOutputDims());
// REQUIRE((op->getOutput(0)->dims()) == dims);
// }
// }
SECTION("+1-D / +1-D") {
// same size
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 1;
}
T0->resize(dims0);
T1->resize(dims0);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dims0);
}
// broadcast
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = nbDimsDist(gen) + 1;
std::vector<std::size_t> dims0(nb_dims);
for (std::size_t i = 0; i < nb_dims; ++i) {
dims0[i] = dimsDist(gen) + 2;
}
std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
for (std::size_t i = 0; i < nb_dims; ++i) {
if (dimsDist(gen) <= 5) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
T0->resize(dims0);
T1->resize(dims1);
REQUIRE_NOTHROW(op->computeOutputDims());
REQUIRE((op->getOutput(0)->dims()) == dimsOut);
// input_0 - wrong
// T1->resize({dims[0] + 1});
std::vector<std::size_t> dims1_wrong = dims1;
for (std::size_t i = 0; i < dims1.size(); ++i) {
++dims1_wrong[i];
}
T1->resize(dims1_wrong);
REQUIRE(dims0 != dims1_wrong);
REQUIRE_THROWS(op->computeOutputDims());
}
}
}
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment