diff --git a/unit_tests/operator/Test_ReduceMeanImpl.cpp b/unit_tests/operator/Test_ReduceMeanImpl.cpp index 0aed151516fd38251d95c6615696858d7fb17da6..9fecafa2fabba55e8bff236b79cf1033dd14d696 100644 --- a/unit_tests/operator/Test_ReduceMeanImpl.cpp +++ b/unit_tests/operator/Test_ReduceMeanImpl.cpp @@ -11,6 +11,8 @@ #include <catch2/catch_test_macros.hpp> #include <memory> +#include <numeric> // std::accumulate +#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution #include "aidge/data/Tensor.hpp" #include "aidge/operator/ReduceMean.hpp" @@ -22,6 +24,129 @@ using namespace Aidge; TEST_CASE("[cpu/operator] ReduceMean(forward)", "[ReduceMean][CPU]") { + SECTION("ForwardDims") + { + constexpr std::uint16_t NBTRIALS = 10; + // Create a random number generator + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1 + std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10)); + std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5)); + std::uniform_int_distribution<int> boolDist(0,1); + + SECTION("KeepDims") { + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + DimSize_t nbDims = nbDimsDist(gen); + std::vector<DimSize_t> dims(nbDims); + std::vector<DimSize_t> expectedOutDims(nbDims); + std::vector<std::int32_t> axes; + for (std::size_t i = 0; i < nbDims; i++) { + dims[i] = dimSizeDist(gen); + expectedOutDims[i] = dims[i]; + if(boolDist(gen)) { + axes.push_back(i); + expectedOutDims[i] = 1; + } + } + if (axes.empty()) { // Default behaviour if no axes are provided is to reduce all dimensions + std::fill(expectedOutDims.begin(), expectedOutDims.end(), 1); + } + + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(dims); + myInput->setBackend("cpu"); + myInput->setDataType(DataType::Float32); + myInput->zeros(); + std::shared_ptr<Node> myReduceMean = ReduceMean(axes, true); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + op->forwardDims(); + + const auto outputDims = op->getOutput(0)->dims(); + REQUIRE(outputDims == expectedOutDims); + } + } + SECTION("Not KeepDims") { + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + DimSize_t nbDims = nbDimsDist(gen); + std::vector<DimSize_t> dims(nbDims); + std::vector<DimSize_t> expectedOutDims; + std::vector<std::int32_t> axes; + for (std::size_t i = 0; i < nbDims; i++) { + dims[i] = dimSizeDist(gen); + if(boolDist(gen)) { + axes.push_back(i); + } + else { + expectedOutDims.push_back(dims[i]); + } + } + if (axes.empty() || expectedOutDims.empty()) { // Default behaviour if no axes are provided is to reduce all dimensions + expectedOutDims = std::vector<DimSize_t>{1}; + } + + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(dims); + myInput->setBackend("cpu"); + myInput->setDataType(DataType::Float32); + std::shared_ptr<Node> myReduceMean = ReduceMean(axes, false); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + + op->forwardDims(); + + const auto outputDims = op->getOutput(0)->dims(); + REQUIRE(outputDims == expectedOutDims); + } + } + SECTION("NoopWithEmptyAxes") { + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + DimSize_t nbDims = nbDimsDist(gen); + std::vector<DimSize_t> dims(nbDims); + for (std::size_t i = 0; i < nbDims; i++) { + dims[i] = dimSizeDist(gen); + } + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(dims); + myInput->setBackend("cpu"); + myInput->setDataType(DataType::Float32); + std::shared_ptr<Node> myReduceMean = ReduceMean(std::vector<int32_t>{}, false, true); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + + op->forwardDims(); + + const auto outputDims = op->getOutput(0)->dims(); + REQUIRE(outputDims == dims); + } + } + SECTION("Not NoopWithEmptyAxes") { + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { + DimSize_t nbDims = nbDimsDist(gen); + std::vector<DimSize_t> dims(nbDims); + for (std::size_t i = 0; i < nbDims; i++) { + dims[i] = dimSizeDist(gen); + } + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(dims); + myInput->setBackend("cpu"); + myInput->setDataType(DataType::Float32); + std::shared_ptr<Node> myReduceMean = ReduceMean({}, false, false); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + + op->forwardDims(); + + REQUIRE(op->getOutput(0)->nbDims() == 1); + REQUIRE(op->getOutput(0)->size() == 1); + } + } + } SECTION("KeepDims") { SECTION("test 1") { std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {