diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp index fd9bdaa8326a3460ce1e986fb64a0a1087786a7a..a69a77fc6986ca76d7657fda59dae4007bcd90ff 100644 --- a/src/operator/Add.cpp +++ b/src/operator/Add.cpp @@ -9,8 +9,7 @@ * ********************************************************************************/ -#include <cassert> -#include <cstddef> +#include <cstddef> // std::size_t #include <string> #include <vector> @@ -30,39 +29,29 @@ void Aidge::Add_Op::computeOutputDims() { associated &= !(getInput(i)->empty()); } if (associated) { - std::vector<std::vector<std::size_t>> inputsDims; - for (std::size_t i = 0; i < nbInputs(); i++) - { - inputsDims.push_back(getInput(i)->dims()); + std::vector<std::vector<std::size_t>> inputsDims(nbInputs()); + for (std::size_t i = 0; i < nbInputs(); i++) { + inputsDims[i] = getInput(i)->dims(); } std::size_t outNbDims = 1; - - for(size_t i=0; i<inputsDims.size() ; ++i) - outNbDims = inputsDims[i].size()>outNbDims?inputsDims[i].size():outNbDims; + for(std::size_t i = 0; i < nbInputs(); ++i) { + outNbDims = (inputsDims[i].size() > outNbDims) ? inputsDims[i].size() : outNbDims; + } std::vector<std::size_t> outDims(outNbDims, 1); - std::vector<std::size_t>::iterator it = outDims.end(); - while (it != outDims.begin()) - { - --it; - for (size_t i = 0; i < inputsDims.size(); i++) - { - if(!inputsDims[i].empty()) - { + for (auto it = outDims.rbegin(); it != outDims.rend(); ++it) { + for (size_t i = 0; i < inputsDims.size(); i++) { + if(!inputsDims[i].empty()) { std::size_t dim = inputsDims[i].back(); inputsDims[i].pop_back(); - if (*it != dim) - { - if(dim != 1) - { - if (*it != 1) - { + if (*it != dim) { + if(dim != 1) { + if (*it != 1) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Add operation"); } - else - { + else { *it = dim; } } diff --git a/unit_tests/operator/Test_Div_Op.cpp b/unit_tests/operator/Test_Div_Op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9e6953716fa87bc98e6d0012a031c7a9dc45da32 --- /dev/null +++ b/unit_tests/operator/Test_Div_Op.cpp @@ -0,0 +1,167 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> +#include <cstddef> // std::size_t +#include <memory> +#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution +#include <vector> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Div.hpp" +#include "aidge/operator/OperatorTensor.hpp" + +namespace Aidge { +TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]") { + constexpr std::uint16_t NBTRIALS = 10; + + // Create a random number generator + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<std::size_t> dist(1, 10); + std::uniform_int_distribution<std::size_t> nbDims(1, 5); + + // Create Div Operator + std::shared_ptr<Node> myDiv = Div(); + auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator()); + + /** @todo Special case of scalar Tensor objects. + * Not handled yet. + */ + // SECTION("0-D / 0-D") { + // // input_0 + // std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + // T0->resize({}); + // op -> associateInput(0,T0); + + // // input_1 - right + // std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + // T1->resize({}); + // op -> associateInput(1,T1); + + // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE((op->getOutput(0)->dims()).empty()); + + // // input_1 - wrong + // T1->resize({dist(gen)}); + + // REQUIRE_THROWS(op->computeOutputDims()); + // } + // SECTION("0-D / +1-D") { + // // input_0 + // std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + // op -> associateInput(0,T0); + // T0->resize({}); + + // // input_1 + // std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + // op -> associateInput(1,T1); + + // for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // const std::size_t nb_dims = nbDims(gen); + // std::vector<std::size_t> dims(nb_dims); + // for (std::size_t i = 0; i < nb_dims; ++i) { + // dims[i] = dist(gen); + // } + + // // input_1 - right + // T1->resize(dims); + + // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE((op->getOutput(0)->dims()) == dims); + + // // input_1 - wrong + // T1->resize({dims[0] + 1}); + + // REQUIRE_THROWS(op->computeOutputDims()); + // } + // } + // SECTION("+1-D / 0-D") { + // // input_1 + // std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + // T1->resize({}); + // op -> associateInput(1,T1); + + // // input_0 + // std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + // op -> associateInput(0,T0); + + // for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + // const std::size_t nb_dims = nbDims(gen); + // std::vector<std::size_t> dims(nb_dims); + // for (std::size_t i = 0; i < nb_dims; ++i) { + // dims[i] = dist(gen); + // } + + // // input_0 - right + // T0->resize(dims); + + // REQUIRE_NOTHROW(op->computeOutputDims()); + // REQUIRE((op->getOutput(0)->dims()) == dims); + + // // input_0 - wrong + // T1->resize({dims[0] + 1}); + + // REQUIRE_THROWS(op->computeOutputDims()); + // } + // } + SECTION("+1-D / +1-D") { + // input_0 + std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(); + op -> associateInput(0,T0); + // input_1 + std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(); + op -> associateInput(1,T1); + + + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + const std::size_t nb_dims = nbDims(gen) + 1; + std::vector<std::size_t> dims(nb_dims); + for (std::size_t i = 0; i < nb_dims; ++i) { + dims[i] = dist(gen); + } + T0->resize(dims); + + // input_1 - right - same dims + T1->resize(dims); + REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE((op->getOutput(0)->dims()) == dims); + + // input_1 - right - broadcast + std::vector<std::size_t> dims_for_broadcast(nb_dims); + for (std::size_t i = 0; i < nb_dims; ++i) { + dims_for_broadcast[i] = ((i % 2) == 0) ? dims[i] : 1; + } + T1->resize(dims); + + REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE((op->getOutput(0)->dims()) == dims); + + // input_1 - right - less dimensions + const std::vector<std::size_t> dims_less_dimensions(dims.cbegin() + nbDims(gen), dims.cend()); + T1->resize(dims_less_dimensions); + + REQUIRE_NOTHROW(op->computeOutputDims()); + REQUIRE((op->getOutput(0)->dims()) == dims); + + // input_0 - wrong + // T1->resize({dims[0] + 1}); + std::vector<std::size_t> dims_wrong = dims; + for (std::size_t i = 0; i< nb_dims; ++i) { + ++dims_wrong[i]; + } + T1->resize(dims); + if (dims != std::vector<size_t>(nb_dims, 1)) + REQUIRE_THROWS(op->computeOutputDims()); + } + } +} +} // namespace Aidge \ No newline at end of file