diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp
index 8d42fa977ca834bb9995a814e3dad8a0533e2bd1..655a9f6c8accb80fc85d8bc7bd9bf378d4f48a6b 100644
--- a/include/aidge/backend/cpu/operator/DivImpl.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl.hpp
@@ -24,10 +24,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class DivImplForward_cpu
-    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*,void*)> {
+    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
 };
 class DivImplBackward_cpu
-    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
 };
 
 class DivImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
index 3bb3975e5ac45f3a1d2aeedd1c86491e25f8e950..01c18a66d379f2549eb0ec591623b442a7633496 100644
--- a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
@@ -13,13 +13,13 @@
 #define AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_
 
 #include "aidge/utils/Registrar.hpp"
-#include <cmath>
-#include <iostream>
+
 #include "aidge/backend/cpu/operator/DivImpl.hpp"
 
 namespace Aidge {
 template <class I1, class I2, class O>
-void DivImpl_cpu_forward_kernel(std::size_t inputLenght,
+void DivImpl_cpu_forward_kernel(std::size_t input1Length,
+                                     std::size_t input2Length,
                                      const void* input1_,
                                      const void* input2_,
                                      void* output_) {
@@ -27,15 +27,21 @@ void DivImpl_cpu_forward_kernel(std::size_t inputLenght,
     const I1* input_1 = static_cast<const I1*>(input1_);
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
-
-    for (std::size_t i = 0; i < inputLenght; ++i) {
-        //TODO: handle Div of two tensors the same size
-        output[i] = input_1[i] / input_2[0];
+    if (input2Length == input1Length)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = input_1[i] / input_2[i];
+        }
+    }
+    else if (input2Length == 1)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = input_1[i] / input_2[0];
+        }
     }
 }
 
 namespace {
-// TODO: add support for Div(float, int)
 static Registrar<DivImplForward_cpu> registrarDivImplForward_cpu_Float32(
         {DataType::Float32, DataType::Float32, DataType::Float32},
         Aidge::DivImpl_cpu_forward_kernel<float, float, float>);
diff --git a/include/aidge/backend/cpu/operator/PowImpl.hpp b/include/aidge/backend/cpu/operator/PowImpl.hpp
index 5ea58aa2e51a8eea034dbebb71adefecfdcb336f..c33fbf0ed4adf4a0206ce8ed32ffdce2cd9ad17c 100644
--- a/include/aidge/backend/cpu/operator/PowImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl.hpp
@@ -24,10 +24,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class PowImplForward_cpu
-    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*,void*)> {
+    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
 };
 class PowImplBackward_cpu
-    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
 };
 
 class PowImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
index 0bf60dfea62057bfaac7e78c9cefe380285833a3..0b9e26485fb29bce932669628d3549da3307ede0 100644
--- a/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
@@ -14,12 +14,13 @@
 
 #include "aidge/utils/Registrar.hpp"
 #include <cmath>
-#include <iostream>
+
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
 
 namespace Aidge {
 template <class I1, class I2, class O>
-void PowImpl_cpu_forward_kernel(std::size_t inputLenght,
+void PowImpl_cpu_forward_kernel(std::size_t input1Length,
+                                     std::size_t input2Length,
                                      const void* input1_,
                                      const void* input2_,
                                      void* output_) {
@@ -28,14 +29,21 @@ void PowImpl_cpu_forward_kernel(std::size_t inputLenght,
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
 
-    for (std::size_t i = 0; i < inputLenght; ++i) {
-        //TODO: handle pow of two tensors the same size
-        output[i] = std::pow(input_1[i], input_2[0]);
+    if (input2Length == input1Length)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = std::pow(input_1[i], input_2[i]);
+        }
+    }
+    else if (input2Length == 1)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = std::pow(input_1[i], input_2[0]);
+        }
     }
 }
 
 namespace {
-// TODO: add support for pow(float, int)
 static Registrar<PowImplForward_cpu> registrarPowImplForward_cpu_Float32(
         {DataType::Float32, DataType::Float32, DataType::Float32},
         Aidge::PowImpl_cpu_forward_kernel<float, float, float>);
diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp
index e0d4cdf8e31f219a02505bbb1ae0c212db80a085..f8b7f84f03ad1315f1aa8906de07e80eb79a2bbd 100644
--- a/src/operator/DivImpl.cpp
+++ b/src/operator/DivImpl.cpp
@@ -30,6 +30,11 @@ void Aidge::DivImpl_cpu::forward() {
     assert(mOp.getInput(0) && "missing input #0");
     assert(mOp.getInput(1) && "missing input #1");
 
+    // TODO add support for when input1 is a 1d tensor of size the channels of input0
+    assert(((mOp.getInput(1)->size() == 1) || 
+            (mOp.getInput(1)->size() == mOp.getInput(0)->size())) &&
+           "input #1 must either be a tensor of size 1 or the same size of input #0");
+
     // Find the correct kernel type
     auto kernelFunc = Registrar<DivImplForward_cpu>::create({
         mOp.getInput(0)->dataType(),
@@ -38,6 +43,7 @@ void Aidge::DivImpl_cpu::forward() {
 
     // Call kernel
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
+        std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getInput(1)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
diff --git a/src/operator/PowImpl.cpp b/src/operator/PowImpl.cpp
index b0b655d39b3b23935ac7dc72971a72e4c219f1bc..d359d40d8d66675c278af8cf105b5adfe47f41e6 100644
--- a/src/operator/PowImpl.cpp
+++ b/src/operator/PowImpl.cpp
@@ -29,6 +29,11 @@ Aidge::NbElts_t Aidge::PowImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_
 void Aidge::PowImpl_cpu::forward() {
     assert(mOp.getInput(0) && "missing input #0");
     assert(mOp.getInput(1) && "missing input #1");
+    
+    // TODO add support for when input1 is a 1d tensor of size the channels of input0
+    assert(((mOp.getInput(1)->size() == 1) || 
+            (mOp.getInput(1)->size() == mOp.getInput(0)->size())) &&
+           "input #1 must either be a tensor of size 1 or the same size of input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<PowImplForward_cpu>::create({
@@ -38,6 +43,7 @@ void Aidge::PowImpl_cpu::forward() {
 
     // Call kernel
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
+        std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getInput(1)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
diff --git a/unit_tests/operator/Test_DivImpl.cpp b/unit_tests/operator/Test_DivImpl.cpp
index 764f63c43a210009ca1b4a12f0acb3e326c8ba9b..e3955c556c3dfd286dcaf60c33daf1980e63a186 100644
--- a/unit_tests/operator/Test_DivImpl.cpp
+++ b/unit_tests/operator/Test_DivImpl.cpp
@@ -21,7 +21,7 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Div(forward)") {
-    SECTION("2D Tensor") {
+    SECTION("2D Tensor by Singleton") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
                 {0.07607108, 0.44075000},
@@ -52,6 +52,42 @@ TEST_CASE("[cpu/operator] Div(forward)") {
 
     }
 
+    SECTION("2D Tensors") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.79780143, 0.49322051},
+                {0.84239346, 0.83737719}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
+            {
+                {0.59088874, 0.78858775},
+                {0.42879432, 0.17615074}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {1.35017204, 0.62544787},
+                {1.96456301, 4.75375366}
+            }
+        });
+
+        std::shared_ptr<Node> myDiv = Div();
+        myDiv->getOperator()->setDatatype(DataType::Float32);
+        myDiv->getOperator()->setBackend("cpu");
+        myDiv->getOperator()->associateInput(0, input_1);
+        myDiv->getOperator()->associateInput(1, input_2);
+        myDiv->getOperator()->computeOutputDims();
+        myDiv->forward();
+
+        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 4; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
     SECTION("4D Tensor") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
             {
diff --git a/unit_tests/operator/Test_PowImpl.cpp b/unit_tests/operator/Test_PowImpl.cpp
index 0cb04137644be78a09b179fd1a7a71f96c2d4903..7ee31ddb12926764789117dcb1bd2699d4157717 100644
--- a/unit_tests/operator/Test_PowImpl.cpp
+++ b/unit_tests/operator/Test_PowImpl.cpp
@@ -21,7 +21,7 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Pow(forward)") {
-    SECTION("2D Tensor") {
+    SECTION("2D Tensor by Singleton") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
                 {0.42139274, 0.51524192},
@@ -52,6 +52,42 @@ TEST_CASE("[cpu/operator] Pow(forward)") {
 
     }
 
+    SECTION("2D Tensors") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.79780143, 0.49322051},
+                {0.84239346, 0.83737719}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
+            {
+                {0.59088874, 0.78858775},
+                {0.42879432, 0.17615074}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.87504572, 0.57271165},
+                {0.92909741, 0.96922028}
+            }
+        });
+
+        std::shared_ptr<Node> myPow = Pow();
+        myPow->getOperator()->setDatatype(DataType::Float32);
+        myPow->getOperator()->setBackend("cpu");
+        myPow->getOperator()->associateInput(0, input_1);
+        myPow->getOperator()->associateInput(1, input_2);
+        myPow->getOperator()->computeOutputDims();
+        myPow->forward();
+
+        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 4; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
     SECTION("4D Tensor") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
             {