From a28eb541a80b45c7c4baf92c480f9d707b58fdd9 Mon Sep 17 00:00:00 2001 From: hrouis <houssemeddine.rouis92@gmail.com> Date: Fri, 9 Aug 2024 14:51:18 +0200 Subject: [PATCH] add noop_with_empty_axes attr --- .../ReduceMeanImpl_forward_kernels.hpp | 5 ++- .../ReduceSumImpl_forward_kernels.hpp | 5 ++- unit_tests/operator/Test_ReduceMeanImpl.cpp | 32 +++++++++++++++++-- unit_tests/operator/Test_ReduceSumImpl.cpp | 30 ++++++++++++++++- 4 files changed, 67 insertions(+), 5 deletions(-) diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp index bba355e1..fb14893f 100644 --- a/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp @@ -38,7 +38,10 @@ void ReduceMeanImpl_cpu_forward_kernel(const std::vector<std::int32_t>& axes, const std::size_t nb_dims = inputDims.size(); const std::size_t totalElements = std::accumulate(inputDims.cbegin(), inputDims.cend(), 1, std::multiplies<std::size_t>()); - if (axes.size() == 1) { + if (axes.empty()){ + std::copy_n(input,totalElements, output); + } + else if (axes.size() == 1) { const std::size_t stride_pre = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + axes[0], 1, std::multiplies<std::size_t>()); const std::size_t stride_post = std::accumulate(inputDims.crbegin(), inputDims.crbegin() + nb_dims -1 - axes[0], 1, std::multiplies<std::size_t>()); diff --git a/include/aidge/backend/cpu/operator/ReduceSumImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReduceSumImpl_forward_kernels.hpp index 9e6c056a..f215065a 100644 --- a/include/aidge/backend/cpu/operator/ReduceSumImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ReduceSumImpl_forward_kernels.hpp @@ -38,7 +38,10 @@ void ReduceSumImpl_cpu_forward_kernel(const std::vector<std::int32_t>& axes, const std::size_t nb_dims = inputDims.size(); const std::size_t totalElements = std::accumulate(inputDims.cbegin(), inputDims.cend(), 1, std::multiplies<std::size_t>()); - if (axes.size() == 1) { + if (axes.empty()){ + std::copy_n(input,totalElements, output); + } + else if (axes.size() == 1) { const std::size_t stride_pre = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + axes[0], 1, std::multiplies<std::size_t>()); const std::size_t stride_post = std::accumulate(inputDims.crbegin(), inputDims.crbegin() + nb_dims -1 - axes[0], 1, std::multiplies<std::size_t>()); diff --git a/unit_tests/operator/Test_ReduceMeanImpl.cpp b/unit_tests/operator/Test_ReduceMeanImpl.cpp index 02696227..58ed69e7 100644 --- a/unit_tests/operator/Test_ReduceMeanImpl.cpp +++ b/unit_tests/operator/Test_ReduceMeanImpl.cpp @@ -157,7 +157,7 @@ TEST_CASE("[cpu/operator] ReduceMean(forward)", "[ReduceMean][CPU]") { {18.25} }); - std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1, 2}, 0); + std::shared_ptr<Node> myReduceMean = ReduceMean({}, 0); auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); op->associateInput(0,myInput); op->setDataType(DataType::Float32); @@ -179,7 +179,7 @@ TEST_CASE("[cpu/operator] ReduceMean(forward)", "[ReduceMean][CPU]") { {0.1293547f} }); - std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1}, 0); + std::shared_ptr<Node> myReduceMean = ReduceMean({}, 0); auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator()); op->associateInput(0,myInput); op->setDataType(DataType::Float32); @@ -189,5 +189,33 @@ TEST_CASE("[cpu/operator] ReduceMean(forward)", "[ReduceMean][CPU]") { // approxEq<float>(*(op->getOutput(0)), *myOutput); REQUIRE(approxEq<float>(*(op->getOutput(0)), *myOutput)); } + SECTION("noop_with_empty_axes") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> { + { + { + { 5.0, 1.0 }, + { 20.0, 2.0 } + }, + { + { 30.0, 1.0 }, + { 40.0, 2.0 } + }, + { + { 55.0, 1.0 }, + { 60.0, 2.0 } + } + } + }); + + std::shared_ptr<Node> myReduceSum = ReduceSum({}, 0, 1); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceSum -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + myReduceSum->forward(); + op->getOutput(0)->print(); + + REQUIRE(*(op->getOutput(0)) == *myInput); + } } } \ No newline at end of file diff --git a/unit_tests/operator/Test_ReduceSumImpl.cpp b/unit_tests/operator/Test_ReduceSumImpl.cpp index d9fe4a22..b93fc8fd 100644 --- a/unit_tests/operator/Test_ReduceSumImpl.cpp +++ b/unit_tests/operator/Test_ReduceSumImpl.cpp @@ -157,7 +157,7 @@ TEST_CASE("[cpu/operator] ReduceSum(forward)", "[ReduceSum][CPU]") { {219.0} }); - std::shared_ptr<Node> myReduceSum = ReduceSum({0, 1, 2}, 0); + std::shared_ptr<Node> myReduceSum = ReduceSum({}, 0); auto op = std::static_pointer_cast<OperatorTensor>(myReduceSum -> getOperator()); op->associateInput(0,myInput); op->setDataType(DataType::Float32); @@ -189,5 +189,33 @@ TEST_CASE("[cpu/operator] ReduceSum(forward)", "[ReduceSum][CPU]") { // approxEq<float>(*(op->getOutput(0)), *myOutput); REQUIRE(approxEq<float>(*(op->getOutput(0)), *myOutput)); } + SECTION("noop_with_empty_axes") { + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> { + { + { + { 5.0, 1.0 }, + { 20.0, 2.0 } + }, + { + { 30.0, 1.0 }, + { 40.0, 2.0 } + }, + { + { 55.0, 1.0 }, + { 60.0, 2.0 } + } + } + }); + + std::shared_ptr<Node> myReduceSum = ReduceSum({}, 0, 1); + auto op = std::static_pointer_cast<OperatorTensor>(myReduceSum -> getOperator()); + op->associateInput(0,myInput); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + myReduceSum->forward(); + op->getOutput(0)->print(); + + REQUIRE(*(op->getOutput(0)) == *myInput); + } } } \ No newline at end of file -- GitLab