From c4e12eeea56e8ed3b612731568fc254da0abbdba Mon Sep 17 00:00:00 2001
From: Houssem Rouis <houssemeddine.rouis92@gmail.com>
Date: Fri, 27 Oct 2023 10:27:03 +0200
Subject: [PATCH] add support for same size tensors

---
 .../aidge/backend/cpu/operator/DivImpl.hpp    |  4 +-
 .../cpu/operator/DivImpl_forward_kernels.hpp  | 22 +++++++----
 .../aidge/backend/cpu/operator/PowImpl.hpp    |  4 +-
 .../cpu/operator/PowImpl_forward_kernels.hpp  | 20 +++++++---
 src/operator/DivImpl.cpp                      |  6 +++
 src/operator/PowImpl.cpp                      |  6 +++
 unit_tests/operator/Test_DivImpl.cpp          | 38 ++++++++++++++++++-
 unit_tests/operator/Test_PowImpl.cpp          | 38 ++++++++++++++++++-
 8 files changed, 118 insertions(+), 20 deletions(-)

diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp
index 8d42fa97..655a9f6c 100644
--- a/include/aidge/backend/cpu/operator/DivImpl.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl.hpp
@@ -24,10 +24,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class DivImplForward_cpu
-    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*,void*)> {
+    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
 };
 class DivImplBackward_cpu
-    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
 };
 
 class DivImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
index 3bb3975e..01c18a66 100644
--- a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
@@ -13,13 +13,13 @@
 #define AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_
 
 #include "aidge/utils/Registrar.hpp"
-#include <cmath>
-#include <iostream>
+
 #include "aidge/backend/cpu/operator/DivImpl.hpp"
 
 namespace Aidge {
 template <class I1, class I2, class O>
-void DivImpl_cpu_forward_kernel(std::size_t inputLenght,
+void DivImpl_cpu_forward_kernel(std::size_t input1Length,
+                                     std::size_t input2Length,
                                      const void* input1_,
                                      const void* input2_,
                                      void* output_) {
@@ -27,15 +27,21 @@ void DivImpl_cpu_forward_kernel(std::size_t inputLenght,
     const I1* input_1 = static_cast<const I1*>(input1_);
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
-
-    for (std::size_t i = 0; i < inputLenght; ++i) {
-        //TODO: handle Div of two tensors the same size
-        output[i] = input_1[i] / input_2[0];
+    if (input2Length == input1Length)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = input_1[i] / input_2[i];
+        }
+    }
+    else if (input2Length == 1)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = input_1[i] / input_2[0];
+        }
     }
 }
 
 namespace {
-// TODO: add support for Div(float, int)
 static Registrar<DivImplForward_cpu> registrarDivImplForward_cpu_Float32(
         {DataType::Float32, DataType::Float32, DataType::Float32},
         Aidge::DivImpl_cpu_forward_kernel<float, float, float>);
diff --git a/include/aidge/backend/cpu/operator/PowImpl.hpp b/include/aidge/backend/cpu/operator/PowImpl.hpp
index 5ea58aa2..c33fbf0e 100644
--- a/include/aidge/backend/cpu/operator/PowImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl.hpp
@@ -24,10 +24,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class PowImplForward_cpu
-    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*,void*)> {
+    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
 };
 class PowImplBackward_cpu
-    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
 };
 
 class PowImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
index 0bf60dfe..0b9e2648 100644
--- a/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
@@ -14,12 +14,13 @@
 
 #include "aidge/utils/Registrar.hpp"
 #include <cmath>
-#include <iostream>
+
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
 
 namespace Aidge {
 template <class I1, class I2, class O>
-void PowImpl_cpu_forward_kernel(std::size_t inputLenght,
+void PowImpl_cpu_forward_kernel(std::size_t input1Length,
+                                     std::size_t input2Length,
                                      const void* input1_,
                                      const void* input2_,
                                      void* output_) {
@@ -28,14 +29,21 @@ void PowImpl_cpu_forward_kernel(std::size_t inputLenght,
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
 
-    for (std::size_t i = 0; i < inputLenght; ++i) {
-        //TODO: handle pow of two tensors the same size
-        output[i] = std::pow(input_1[i], input_2[0]);
+    if (input2Length == input1Length)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = std::pow(input_1[i], input_2[i]);
+        }
+    }
+    else if (input2Length == 1)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = std::pow(input_1[i], input_2[0]);
+        }
     }
 }
 
 namespace {
-// TODO: add support for pow(float, int)
 static Registrar<PowImplForward_cpu> registrarPowImplForward_cpu_Float32(
         {DataType::Float32, DataType::Float32, DataType::Float32},
         Aidge::PowImpl_cpu_forward_kernel<float, float, float>);
diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp
index e0d4cdf8..f8b7f84f 100644
--- a/src/operator/DivImpl.cpp
+++ b/src/operator/DivImpl.cpp
@@ -30,6 +30,11 @@ void Aidge::DivImpl_cpu::forward() {
     assert(mOp.getInput(0) && "missing input #0");
     assert(mOp.getInput(1) && "missing input #1");
 
+    // TODO add support for when input1 is a 1d tensor of size the channels of input0
+    assert(((mOp.getInput(1)->size() == 1) || 
+            (mOp.getInput(1)->size() == mOp.getInput(0)->size())) &&
+           "input #1 must either be a tensor of size 1 or the same size of input #0");
+
     // Find the correct kernel type
     auto kernelFunc = Registrar<DivImplForward_cpu>::create({
         mOp.getInput(0)->dataType(),
@@ -38,6 +43,7 @@ void Aidge::DivImpl_cpu::forward() {
 
     // Call kernel
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
+        std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getInput(1)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
diff --git a/src/operator/PowImpl.cpp b/src/operator/PowImpl.cpp
index b0b655d3..d359d40d 100644
--- a/src/operator/PowImpl.cpp
+++ b/src/operator/PowImpl.cpp
@@ -29,6 +29,11 @@ Aidge::NbElts_t Aidge::PowImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_
 void Aidge::PowImpl_cpu::forward() {
     assert(mOp.getInput(0) && "missing input #0");
     assert(mOp.getInput(1) && "missing input #1");
+    
+    // TODO add support for when input1 is a 1d tensor of size the channels of input0
+    assert(((mOp.getInput(1)->size() == 1) || 
+            (mOp.getInput(1)->size() == mOp.getInput(0)->size())) &&
+           "input #1 must either be a tensor of size 1 or the same size of input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<PowImplForward_cpu>::create({
@@ -38,6 +43,7 @@ void Aidge::PowImpl_cpu::forward() {
 
     // Call kernel
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
+        std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getInput(1)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
diff --git a/unit_tests/operator/Test_DivImpl.cpp b/unit_tests/operator/Test_DivImpl.cpp
index 764f63c4..e3955c55 100644
--- a/unit_tests/operator/Test_DivImpl.cpp
+++ b/unit_tests/operator/Test_DivImpl.cpp
@@ -21,7 +21,7 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Div(forward)") {
-    SECTION("2D Tensor") {
+    SECTION("2D Tensor by Singleton") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
                 {0.07607108, 0.44075000},
@@ -52,6 +52,42 @@ TEST_CASE("[cpu/operator] Div(forward)") {
 
     }
 
+    SECTION("2D Tensors") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.79780143, 0.49322051},
+                {0.84239346, 0.83737719}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
+            {
+                {0.59088874, 0.78858775},
+                {0.42879432, 0.17615074}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {1.35017204, 0.62544787},
+                {1.96456301, 4.75375366}
+            }
+        });
+
+        std::shared_ptr<Node> myDiv = Div();
+        myDiv->getOperator()->setDatatype(DataType::Float32);
+        myDiv->getOperator()->setBackend("cpu");
+        myDiv->getOperator()->associateInput(0, input_1);
+        myDiv->getOperator()->associateInput(1, input_2);
+        myDiv->getOperator()->computeOutputDims();
+        myDiv->forward();
+
+        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 4; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
     SECTION("4D Tensor") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
             {
diff --git a/unit_tests/operator/Test_PowImpl.cpp b/unit_tests/operator/Test_PowImpl.cpp
index 0cb04137..7ee31ddb 100644
--- a/unit_tests/operator/Test_PowImpl.cpp
+++ b/unit_tests/operator/Test_PowImpl.cpp
@@ -21,7 +21,7 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Pow(forward)") {
-    SECTION("2D Tensor") {
+    SECTION("2D Tensor by Singleton") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
                 {0.42139274, 0.51524192},
@@ -52,6 +52,42 @@ TEST_CASE("[cpu/operator] Pow(forward)") {
 
     }
 
+    SECTION("2D Tensors") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.79780143, 0.49322051},
+                {0.84239346, 0.83737719}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
+            {
+                {0.59088874, 0.78858775},
+                {0.42879432, 0.17615074}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.87504572, 0.57271165},
+                {0.92909741, 0.96922028}
+            }
+        });
+
+        std::shared_ptr<Node> myPow = Pow();
+        myPow->getOperator()->setDatatype(DataType::Float32);
+        myPow->getOperator()->setBackend("cpu");
+        myPow->getOperator()->associateInput(0, input_1);
+        myPow->getOperator()->associateInput(1, input_2);
+        myPow->getOperator()->computeOutputDims();
+        myPow->forward();
+
+        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 4; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
     SECTION("4D Tensor") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
             {
-- 
GitLab