diff --git a/include/aidge/backend/cpu/data/Broadcasting.hpp b/include/aidge/backend/cpu/data/Broadcasting.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..cb969cb54806a204072763a1672ee5266fb6347e
--- /dev/null
+++ b/include/aidge/backend/cpu/data/Broadcasting.hpp
@@ -0,0 +1,49 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_DATA_BROADCASTING_H_
+#define AIDGE_CPU_DATA_BROADCASTING_H_
+
+#include <vector>
+
+namespace Aidge {
+
+// Function to broadCast an input dims vector into the same size as an outputDims vector
+
+    /**
+     * @brief  Broadcast an input dims vector into the same size as an outputDims vector
+     * @details The missing dimensions would be completed by 1
+     * @param outputDims The vector of dimensions to follow 
+     * @param dimsToBroadcast The vecotr of dimensions to braodcast
+     * @return std::vector<std::size_t> a broadcasted vector by addding 1 on the missing dimensions.
+     */
+    std::vector<std::size_t> getBroadcastedDims(const std::vector<std::size_t>& outputDims, const std::vector<std::size_t>& dimsToBroadcast);
+
+    /**
+     * @brief Get a vector of indexes along the dimensions vector from a flattened index
+     * @param dimensions The vector of dimensions we want the indexes on
+     * @param idx The flattened index
+     * @return std::vector<std::size_t> vector of indexes along dimensions.
+     */
+    std::vector<std::size_t> getMultiDimIndices(const std::vector<std::size_t>& dimensions, std::size_t idx);
+
+    // Function to get a flattened index from multi-dimensional indices
+    /**
+     * @brief Get a flattened index the dimensions vector from a given vector of indices on a broadcasted vector
+     * @param dimensions The vector of dimensions we want the flattened index on
+     * @param indices The vector of indices we want to flatten
+     * @return std::size_t The flattened index on the dimensions vector
+     */
+    std::size_t getFlattenedIndex(const std::vector<std::size_t>& dimensions, const std::vector<std::size_t>& indices);
+
+} // namespace Aidge
+
+#endif // AIDGE_CPU_DATA_BROADCASTING_H_
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp
index 0299148d086ae6e2be967232e8157c6a6229b0f7..57669c628b4fa650f137c2b28c8c0a4584bf6c35 100644
--- a/include/aidge/backend/cpu/operator/AddImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class AddImplForward_cpu
-    : public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const std::vector<const void*>, void*)> {};
+    : public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)> {};
 
 class AddImplBackward_cpu
-    : public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const std::vector<const void*>, void*)> {};
+    : public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)> {};
 
 
 class AddImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp
index 631ad44a562c17d41ad019a1da112dbf8a69185c..478a0226f43ccbc64d567a56ab89a558179438c5 100644
--- a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp
@@ -14,12 +14,13 @@
 
 #include "aidge/utils/Registrar.hpp"
 
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/AddImpl.hpp"
 
 namespace Aidge {
 
 template <class I, class O>
-void AddImpl_cpu_forward_kernel(const std::size_t inputLength, const std::vector<const void*> inputs_, void* output_) {
+void AddImpl_cpu_forward_kernel(const std::vector<const void*> inputs_, const std::vector<std::vector<std::size_t>>& inputDims, const std::size_t outputLength, const std::vector<std::size_t>& outDims, void* output_) {
     // FIXME: missing Add attributes as arguments
     std::vector<const I*> inputs;
     for (const auto& input_ : inputs_) {
@@ -27,12 +28,15 @@ void AddImpl_cpu_forward_kernel(const std::size_t inputLength, const std::vector
     }
     O* output = static_cast<O*>(output_);
 
-    for (std::size_t oIndex = 0; oIndex < inputLength; ++oIndex) {
+	for (std::size_t oIndex = 0; oIndex < outputLength; ++oIndex)
+	{
         output[oIndex] = 0;
-        for (std::size_t iIndex = 0; iIndex < inputs.size(); ++iIndex) {
-            output[oIndex] += inputs[iIndex][oIndex];
-        }
-    }
+		std::vector<size_t> indexes = getMultiDimIndices(outDims, oIndex);
+		for(std::size_t iIndex = 0; iIndex < inputs.size(); ++iIndex) {
+			std::size_t idx = getFlattenedIndex(inputDims[iIndex], indexes);
+            output[oIndex] += inputs[iIndex][idx];
+		}
+	}
 }
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp
index 73809ee81e26fff23e40763405857ddd2c95db0c..710e288d8e0f95b69a2f4973679f1195e6d9cb6a 100644
--- a/include/aidge/backend/cpu/operator/DivImpl.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl.hpp
@@ -12,23 +12,24 @@
 #ifndef AIDGE_CPU_OPERATOR_DIVIMPL_H_
 #define AIDGE_CPU_OPERATOR_DIVIMPL_H_
 
+#include <memory>
+#include <tuple>
+#include <vector>
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Div.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include <memory>
-#include <vector>
 
 namespace Aidge {
-// class Div_Op;
 
 // compute kernel registry for forward and backward
 class DivImplForward_cpu
-    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
+    // : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
+    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const std::size_t, const void*, const void*,void*)> {
 };
 class DivImplBackward_cpu
-    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
 };
 
 class DivImpl_cpu : public OperatorImpl {
@@ -40,7 +41,8 @@ public:
     }
 
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    void forward() override;
+
+    void forward() override final;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
index e2ead9ca8de3ed8328b659906336766fbfbb6a47..3cdcefa9e1c865f66b64ed527605d46af31be8af 100644
--- a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
@@ -12,42 +12,64 @@
 #ifndef AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_
 #define AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_
 
+#include <numeric>     // std::accumulate
+#include <cstddef>     // std::size_t
+#include <functional>  // std::multiplies
+
 #include "aidge/utils/Registrar.hpp"
 
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/DivImpl.hpp"
 
 namespace Aidge {
+// template <class I1, class I2, class O>
+// void DivImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
+//                                 const std::vector<std::size_t>& input2Dims,
+//                                 const std::vector<std::size_t>& outputDims,
+//                                 const void* input1_,
+//                                 const void* input2_,
+//                                 void* output_) {
+
+//     const I1* input_1 = static_cast<const I1*>(input1_);
+//     const I2* input_2 = static_cast<const I2*>(input2_);
+//     O* output = static_cast<O*>(output_);
+
+//     const std::size_t totalElements = std::accumulate(outputDims.cbegin(), outputDims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+
+// 	for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex)
+// 	{
+// 		std::vector<std::size_t> indexes = getMultiDimIndices(outputDims, oIndex);
+
+// 		std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
+// 		std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+
+//         // TODO assert if input_2 is bad?
+//         output[oIndex] = input_1[idx1] / input_2[idx2];
+//     }
+// }
+
 template <class I1, class I2, class O>
-void DivImpl_cpu_forward_kernel(std::size_t input1Length,
-                                     std::size_t input2Length,
-                                     const void* input1_,
-                                     const void* input2_,
-                                     void* output_) {
+constexpr void DivImpl_cpu_forward_kernel(const std::size_t input1size_,
+                                const std::size_t input2size_,
+                                const std::size_t output1size_,
+                                const void* input1_,
+                                const void* input2_,
+                                void* output_) {
 
     const I1* input_1 = static_cast<const I1*>(input1_);
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
-    if (input2Length == input1Length)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = input_1[i] / input_2[i];
-        }
-    }
-    else if (input2Length == 1)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = input_1[i] / input_2[0];
-        }
-    }
-    else // input_2 is 1d and of size the number of channels of input_1
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            std::size_t channelIdx = i % input2Length;
-            output[i] = input_1[i] / input_2[channelIdx];
-        }
+
+// suppose values are contiguous in memory
+    for (std::size_t i = 0; i < output1size_; ++i) {
+        const std::size_t in1_id = (input1size_ != 1) ? i : 0;
+        const std::size_t in2_id = (input2size_ != 1) ? i : 0;
+        output[i] = static_cast<O>(input_1[in1_id] / input_2[in2_id]);
     }
 }
 
+
+
 namespace {
 static Registrar<DivImplForward_cpu> registrarDivImplForward_cpu_Float32(
         {DataType::Float32, DataType::Float32, DataType::Float32},
diff --git a/include/aidge/backend/cpu/operator/MulImpl.hpp b/include/aidge/backend/cpu/operator/MulImpl.hpp
index f1b58e59b9ac1d3a1d34162a1054534830b8d508..a6f63ba284baf4cc12190d6b96a89f0baa821c95 100644
--- a/include/aidge/backend/cpu/operator/MulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class MulImplForward_cpu
-    : public Registrable<MulImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
+    : public Registrable<MulImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
 };
 class MulImplBackward_cpu
-    : public Registrable<MulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<MulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
 };
 
 class MulImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp
index 9caef8b88af3ca779309b60eba984a72db35f84a..e1387768ea02e2a9f35790c64c7674c321a1faa7 100644
--- a/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp
@@ -14,37 +14,35 @@
 
 #include "aidge/utils/Registrar.hpp"
 
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/MulImpl.hpp"
 
 namespace Aidge {
 template <class I1, class I2, class O>
-void MulImpl_cpu_forward_kernel(std::size_t input1Length,
-                                     std::size_t input2Length,
-                                     const void* input1_,
-                                     const void* input2_,
-                                     void* output_) {
+void MulImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
+                                const std::vector<std::size_t>& input2Dims,
+                                const std::vector<std::size_t>& outputDims,
+                                const void* input1_,
+                                const void* input2_,
+                                void* output_) {
 
     const I1* input_1 = static_cast<const I1*>(input1_);
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
-    if (input2Length == input1Length)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = input_1[i] * input_2[i];
-        }
-    }
-    else if (input2Length == 1)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = input_1[i] * input_2[0];
-        }
+
+    size_t totalElements = 1;
+    for (size_t dimSize : outputDims) {
+        totalElements *= dimSize;
     }
-    else // input_2 is 1d and of size the number of channels of input_1
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            std::size_t channelIdx = i % input2Length;
-            output[i] = input_1[i] * input_2[channelIdx];
-        }
+
+	for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex) 
+	{
+		std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex);
+
+		std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
+		std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+		
+        output[oIndex] = input_1[idx1] * input_2[idx2];
     }
 }
 
diff --git a/include/aidge/backend/cpu/operator/PowImpl.hpp b/include/aidge/backend/cpu/operator/PowImpl.hpp
index d3cafa7e7380e31dd331950e381e08210c3f3a4c..c6e4cd36746141d7f1d1092c9bd45af41d8a9173 100644
--- a/include/aidge/backend/cpu/operator/PowImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class PowImplForward_cpu
-    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
+    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
 };
 class PowImplBackward_cpu
-    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
 };
 
 class PowImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
index c9c5db7e9aef07d24ba8f80c94b8f2494865e004..1146cfa77464f8bd1c33a0ec0113415dcf599b53 100644
--- a/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
@@ -15,39 +15,36 @@
 #include "aidge/utils/Registrar.hpp"
 #include <cmath>
 
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
 
 namespace Aidge {
 template <class I1, class I2, class O>
-void PowImpl_cpu_forward_kernel(std::size_t input1Length,
-                                     std::size_t input2Length,
-                                     const void* input1_,
-                                     const void* input2_,
-                                     void* output_) {
+void PowImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
+                                const std::vector<std::size_t>& input2Dims,
+                                const std::vector<std::size_t>& outputDims,
+                                const void* input1_,
+                                const void* input2_,
+                                void* output_) {
 
     const I1* input_1 = static_cast<const I1*>(input1_);
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
 
-    if (input2Length == input1Length)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = std::pow(input_1[i], input_2[i]);
-        }
-    }
-    else if (input2Length == 1)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = std::pow(input_1[i], input_2[0]);
-        }
-    }
-    else // input_2 is 1d and of size the number of channels of input_1
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            std::size_t channelIdx = i % input2Length;
-            output[i] = std::pow(input_1[i], input_2[channelIdx]);
-        }
+    size_t totalElements = 1;
+    for (size_t dimSize : outputDims) {
+        totalElements *= dimSize;
     }
+
+	for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex) 
+	{
+		std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex);
+
+		std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
+		std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+		
+        output[oIndex] = std::pow(input_1[idx1], input_2[idx2]);
+	}
 }
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/SubImpl.hpp b/include/aidge/backend/cpu/operator/SubImpl.hpp
index 2d4c22f0d7f5e850ce805e0c78fb3e64bfa8f42b..b329ec6eb0ed7f450b62cdbe289d69acf4f4edc4 100644
--- a/include/aidge/backend/cpu/operator/SubImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SubImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class SubImplForward_cpu
-    : public Registrable<SubImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
+    : public Registrable<SubImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
 };
 class SubImplBackward_cpu
-    : public Registrable<SubImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<SubImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
 };
 
 class SubImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp
index 08f2e24fa38d2739943279666187a55d7076a89b..19b0bd21de129ed303151987323234364ce5f6f2 100644
--- a/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp
@@ -14,39 +14,35 @@
 
 #include "aidge/utils/Registrar.hpp"
 
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
 
+
 namespace Aidge {
 template <class I1, class I2, class O>
-void SubImpl_cpu_forward_kernel(std::size_t input1Length,
-                                     std::size_t input2Length,
-                                     const void* input1_,
-                                     const void* input2_,
-                                     void* output_) {
+void SubImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
+                                const std::vector<std::size_t>& input2Dims,
+                                const std::vector<std::size_t>& outputDims,
+                                const void* input1_,
+                                const void* input2_,
+                                void* output_) {
 
     const I1* input_1 = static_cast<const I1*>(input1_);
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
 
-    if (input2Length == input1Length)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = input_1[i] - input_2[i];
-        }
-    }
-    else if (input2Length == 1)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = input_1[i] - input_2[0];
-        }
-    }
-    else // input_2 is 1d and of size the number of channels of input_1
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            std::size_t channelIdx = i % input2Length;
-            output[i] = input_1[i] - input_2[channelIdx];
-        }
+    size_t totalElements = 1;
+    for (size_t dimSize : outputDims) {
+        totalElements *= dimSize;
     }
+
+	for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex) 
+	{
+		std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex);
+		std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
+		std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+        output[oIndex] = input_1[idx1] - input_2[idx2];
+	}
 }
 
 namespace {
diff --git a/src/data/Broadcasting.cpp b/src/data/Broadcasting.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..22977aa772e3f3f4810a59ff1fc024cc21c66bd1
--- /dev/null
+++ b/src/data/Broadcasting.cpp
@@ -0,0 +1,46 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
+
+std::vector<std::size_t> Aidge::getBroadcastedDims(const std::vector<std::size_t>& outputDims, const std::vector<std::size_t>& dimsToBroadcast){
+    std::vector<std::size_t> broadcastedDims(outputDims.size(), 1);
+		for(int j=dimsToBroadcast.size()-1; j>=0; --j)
+		{
+			std::size_t idx = outputDims.size() - (dimsToBroadcast.size()-j);
+			broadcastedDims[idx] = dimsToBroadcast[j];
+		}
+    return broadcastedDims;
+}
+
+std::vector<std::size_t> Aidge::getMultiDimIndices(const std::vector<std::size_t>& dimensions, std::size_t idx){
+    std::vector<std::size_t> indices(dimensions.size(), 0);
+
+    for (int i = dimensions.size() - 1; i >= 0; --i) {
+        indices[i] = idx % dimensions[i];
+        idx /= dimensions[i];
+    }
+
+    return indices;
+}
+
+std::size_t Aidge::getFlattenedIndex(const std::vector<std::size_t>& dimensions, const std::vector<std::size_t>& indices){
+    std::size_t flattenedIdx = 0;
+    std::size_t stride = 1;
+
+    for (int i = dimensions.size() - 1; i >= 0; --i) {
+        std::size_t idx = dimensions[i]>1 ? indices[i] : 0;
+        flattenedIdx += idx * stride;
+        stride *= dimensions[i];
+    }
+    return flattenedIdx;
+}
+
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index 3b53eaf3b88fb418746ab5a7a2297a15606974d3..7355ebcb3e8fb68bf74dbd1ce831bf471d285cb7 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -55,15 +55,26 @@ void  Aidge::AddImpl_cpu::forward() {
     // TODO: right now, if needed, memory will be allocated/deallocated at each
     // call to forward(). We might put the following shared_ptr as members of
     // this class to avoid that.
+    std::size_t nbDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->nbDims();
+    std::vector<std::vector<std::size_t>> inputsDims;
     std::vector<const void*> opInputs;
     std::vector<std::shared_ptr<Tensor>> inputsFallback(mOp.nbInputs());
     for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
+        std::vector<std::size_t> inputDims(nbDims, 1);
+        auto dims = std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dims();
+		for(std::size_t j=dims.size()-1; j+1>0; --j)
+		{
+			std::size_t idx = nbDims - (dims.size()-j);
+			inputDims[idx] = dims[j];
+		}
+        inputsDims.push_back(inputDims);
         const auto& input = std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->refCastFrom(inputsFallback[i], *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
         opInputs.push_back(input.getImpl()->rawPtr());
     }
 
-    // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-               opInputs,
+    kernelFunc(opInputs,
+               inputsDims,
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
                getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp
index f5cde077bd5a414d8b9add8b8b8715952a27ad01..292a3b56682889051fd48b53382e5030f4e1ee50 100644
--- a/src/operator/DivImpl.cpp
+++ b/src/operator/DivImpl.cpp
@@ -9,18 +9,15 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
+#include <memory>
 #include <vector>
 
-#include "aidge/operator/Div.hpp"
-#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
-
 #include "aidge/backend/cpu/operator/DivImpl.hpp"
 #include "aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
 
 Aidge::NbElts_t Aidge::DivImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
@@ -28,16 +25,145 @@ Aidge::NbElts_t Aidge::DivImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_
 }
 
 void Aidge::DivImpl_cpu::forward() {
+    // Find the correct kernel type
+    // auto kernelFunc = Registrar<DivImplForward_cpu>::create({
+    //     std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+    //     std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
+    //     std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+    //                                                                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims());
+    // const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+    //                                                                std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims());
+
+
+    // auto a = std::static_pointer_cast<Tensor>(mOp.getRawInput(0));
+    // auto b = std::static_pointer_cast<Tensor>(mOp.getRawInput(1));
+
+    // // Call kernel
+    // kernelFunc(inputDims0,
+    //     inputDims1,
+    //     std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+    //     getCPUPtr(mOp.getRawInput(0)),
+    //     getCPUPtr(mOp.getRawInput(1)),
+    //     getCPUPtr(mOp.getRawOutput(0)));
+
+/////////////////////////////////////////////////////////////////
+
+    // [5,2,1,7] & [2,6,7]
+    // 1. Same number of dimensions -> [5,2,1,7] & [1,2,6,7]
+    // 2. Find the highest equal dimension -> 3
+    //    Exception: if the first diverging dimension is the last one, then -> 4 (dims.size())
+    // 3. Compute the highest number of contiguous data -> 7
+    // 4. Compute stride and offset step for the broadcast mechnism
+    // 5. Call a simple kernel
+
     // Find the correct kernel type
     auto kernelFunc = Registrar<DivImplForward_cpu>::create({
         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
-    // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)))->size(),
-        std::static_pointer_cast<Tensor>(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)))->size(),
-        getCPUPtr(mOp.getRawInput(0)),
-        getCPUPtr(mOp.getRawInput(1)),
-        getCPUPtr(mOp.getRawOutput(0)));
+    // Compute compatible input dimensions
+    std::vector<std::size_t>        dims0   = static_cast<const Div_Op&>(mOp).getInput(0)->dims();
+    std::vector<std::size_t>        dims1   = static_cast<const Div_Op&>(mOp).getInput(1)->dims();
+    const std::vector<std::size_t>& outDims = static_cast<const Div_Op&>(mOp).getOutput(0)->dims();
+
+    // if (dims0 == dims1) {
+    //     const std::size_t input0_contiguous_size = std::accumulate(dims0.cbegin(), dims0.cend(), std::size_t(1), std::multiplies<std::size_t>());
+    //     kernelFunc(input0_contiguous_size, input0_contiguous_size, input0_contiguous_size,
+    //                 getCPUPtr(mOp.getRawInput(0)),
+    //                 getCPUPtr(mOp.getRawInput(1)),
+    //                 getCPUPtr(mOp.getRawOutput(0)));
+    //     return;
+    // }
+
+    if (dims0.size() > dims1.size()) {
+        dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
+    }
+    else if (dims1.size() > dims0.size()) {
+        dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
+    }
+
+    const std::size_t nbDims = dims0.size();
+
+    // Find the highest equal dimension
+    std::size_t contiguousIdx = nbDims - 1;
+    for (; contiguousIdx+1 > 0; --contiguousIdx) {
+        if (dims0[contiguousIdx] != dims1[contiguousIdx]) {
+            if (contiguousIdx == (nbDims -1)) {
+                if (dims0[contiguousIdx] == 1) {
+                    while ((dims0[contiguousIdx] == 1) && (contiguousIdx+1 > 0)) {
+                        --contiguousIdx;
+                    }
+                }
+                else {
+                    while ((dims1[contiguousIdx] == 1) && (contiguousIdx+1 > 0)) {
+                        --contiguousIdx;
+                    }
+                }
+            }
+            break;
+        }
+    }
+    ++contiguousIdx;
+
+    // Compute the highest number of contiguous data for each Tensor
+    const std::size_t input0_contiguous_size = std::accumulate(dims0.cbegin()+contiguousIdx, dims0.cend(), std::size_t(1), std::multiplies<std::size_t>());
+    const std::size_t input1_contiguous_size = std::accumulate(dims1.cbegin()+contiguousIdx, dims1.cend(), std::size_t(1), std::multiplies<std::size_t>());
+    const std::size_t output_contiguous_size = std::accumulate(outDims.cbegin()+contiguousIdx, outDims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+
+    // initialize strides to iterate through data because of broadcasting
+    std::size_t *stride_post0;
+    std::size_t *stride_post1;
+    std::int32_t *stride_step0;
+    std::int32_t *stride_step1;
+    if (contiguousIdx > 0) {
+        stride_post0 = new std::size_t[contiguousIdx];
+        stride_post0[contiguousIdx - 1] = 1;
+        stride_post1 = new std::size_t[contiguousIdx];
+        stride_post1[contiguousIdx - 1] = 1;
+        for (std::size_t i = contiguousIdx - 2; i != static_cast<std::size_t>(-1); --i) {
+            stride_post0[i] = stride_post0[i+1]*dims0[i+1];
+            stride_post1[i] = stride_post1[i+1]*dims1[i+1];
+        }
+        stride_step0 = new std::int32_t[contiguousIdx];
+        stride_step1 = new std::int32_t[contiguousIdx];
+        for (std::size_t i = 0; i != contiguousIdx; ++i) {
+            stride_step0[i] = (dims0[i] == 1) ? 1 - static_cast<std::int32_t>(stride_post0[i]) : 1;
+            stride_step1[i] = (dims1[i] == 1) ? 1 - static_cast<std::int32_t>(stride_post1[i]) : 1;
+        }
+    }
+
+    // variables for arrays offsets
+    std::size_t offsetIn0 = 0;
+    std::size_t offsetIn1 = 0;
+    std::size_t offsetOut = 0;
+
+
+    std::size_t dim = contiguousIdx - 1;
+    const std::size_t nbStacks = std::accumulate(outDims.cbegin(), outDims.cbegin() + contiguousIdx, std::size_t(1), std::multiplies<std::size_t>());
+    for (std::size_t stack = 0; stack < nbStacks;) {
+        kernelFunc(input0_contiguous_size, input1_contiguous_size, output_contiguous_size,
+                    getCPUPtr(mOp.getRawInput(0), offsetIn0*input0_contiguous_size),
+                    getCPUPtr(mOp.getRawInput(1), offsetIn1*input1_contiguous_size),
+                    getCPUPtr(mOp.getRawOutput(0), offsetOut*output_contiguous_size));
+        if (++stack < nbStacks) {
+            std::size_t tmp_stack = stack;
+            while(tmp_stack % outDims[dim] == 0) {
+                tmp_stack /= outDims[dim];
+                dim--;
+            }
+            offsetIn0 += stride_step0[dim];
+            offsetIn1 += stride_step1[dim];
+            ++offsetOut;
+            dim = contiguousIdx - 1;
+        }
+    }
+    if (contiguousIdx > 0) {
+        delete[] stride_post0;
+        delete[] stride_post1;
+        delete[] stride_step0;
+        delete[] stride_step1;
+    }
 }
diff --git a/src/operator/MulImpl.cpp b/src/operator/MulImpl.cpp
index fda49c3f20ed5cbe519d729a0bf759f0964a99fd..87d180b013e44a49cb887ce722533c50206f3889 100644
--- a/src/operator/MulImpl.cpp
+++ b/src/operator/MulImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Mul.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/MulImpl.hpp"
@@ -34,9 +35,15 @@ void Aidge::MulImpl_cpu::forward() {
         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
+    const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+                                                                   std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims());
+    const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+                                                                   std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims());
+
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(),
+    kernelFunc(inputDims0,
+        inputDims1,
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawInput(1)),
         getCPUPtr(mOp.getRawOutput(0)));
diff --git a/src/operator/PowImpl.cpp b/src/operator/PowImpl.cpp
index 496646402e33869cfcbe7dae96e1fc81b875d0dd..22b4e27afd4e327c42be066bf7eeb6effdd8b2a9 100644
--- a/src/operator/PowImpl.cpp
+++ b/src/operator/PowImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Pow.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
@@ -34,9 +35,15 @@ void Aidge::PowImpl_cpu::forward() {
         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
+    const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+                                                                   std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims());
+    const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+                                                                   std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims());
+
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(),
+    kernelFunc(inputDims0,
+        inputDims1,
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawInput(1)),
         getCPUPtr(mOp.getRawOutput(0)));
diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
index 038a1154182ea8f359cf1b485c3de251ffbbaed5..475f8cb8704739e091f0b8f01ffce680fd851e1f 100644
--- a/src/operator/SubImpl.cpp
+++ b/src/operator/SubImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Sub.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
@@ -35,9 +36,15 @@ void Aidge::SubImpl_cpu::forward() {
         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
+    const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+                                                                   std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims());
+    const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+                                                                   std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims());
+
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(),
+    kernelFunc(inputDims0,
+        inputDims1,
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawInput(1)),
         getCPUPtr(mOp.getRawOutput(0)));
diff --git a/unit_tests/operator/Test_AddImpl.cpp b/unit_tests/operator/Test_AddImpl.cpp
index 740b1a5322b55e2347d93ed2e515358080a108a5..e2e7051afda5e7f72c3142987587179bc759f1e8 100644
--- a/unit_tests/operator/Test_AddImpl.cpp
+++ b/unit_tests/operator/Test_AddImpl.cpp
@@ -117,4 +117,63 @@ TEST_CASE("[cpu/operator] Add(forward)", "[Add][CPU]") {
 
         REQUIRE(*op->getOutput(0) == *expectedOutput);
     }
+
+    SECTION("Broadcasting") {
+        std::shared_ptr<Tensor> input_0 = std::make_shared<Tensor>(Array4D<int,3,1,3,2> {
+        {                                       //
+            {                                   //
+                {{0, 1},{2, 3},{4, 5}}          //
+            },                                  //
+            {                                   //
+                {{6, 7},{8, 9},{10, 11}}        //
+            },                                  //
+            {                                   //
+                {{12, 13},{14, 15},{16, 17}}    //
+            }                                   //
+        }                                       //
+        });                                     //
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
+        {                                       //
+            {                                   //
+                {{20, 21},{22, 23},{24, 25}},   //
+                {{26, 27},{28, 29},{30, 31}},   //
+                {{32, 33},{34, 35},{36, 37}}    //
+            }                                   //
+        }                                       //
+        });                                     //
+
+        std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<int,2> {{100,200}});  
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
+            {                                               //
+                {                                           //
+                    {{ 120, 222},{ 124, 226},{ 128, 230}},  //
+                    {{ 126, 228},{ 130, 232},{ 134, 236}},  //
+                    {{ 132, 234},{ 136, 238},{ 140, 242}}   //
+                },                                          //
+                {                                           //
+                    {{ 126, 228},{ 130, 232},{ 134, 236}},  //
+                    {{ 132, 234},{ 136, 238},{ 140, 242}},  //
+                    {{ 138, 240},{ 142, 244},{ 146, 248}}   //
+                },                                          //
+                {                                           //
+                    {{ 132, 234},{ 136, 238},{140, 242}},   //
+                    {{ 138, 240},{ 142, 244},{146, 248}},   //
+                    {{ 144, 246},{ 148, 250},{152, 254}}    //
+                }                                           //
+            }                                               //
+        });                                                 //
+
+        std::shared_ptr<Node> myAdd = Add(3);
+        auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator());
+        op->associateInput(0, input_0);
+        op->associateInput(1, input_1);
+        op->associateInput(2, input_2);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
+        myAdd->forward();
+        op->getOutput(0)->print();
+        expectedOutput->print();
+        REQUIRE(*op->getOutput(0) == *expectedOutput);
+    }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_DivImpl.cpp b/unit_tests/operator/Test_DivImpl.cpp
index 16f69db964a092f6be87e5d983ba00694e8006f8..a0ed261fe9622f36a9bb2e46c4796ae7f6f8f5e6 100644
--- a/unit_tests/operator/Test_DivImpl.cpp
+++ b/unit_tests/operator/Test_DivImpl.cpp
@@ -10,202 +10,307 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::uint16_t
+#include <chrono>
+#include <iostream>
+#include <memory>
+#include <numeric>   // std::accumulate
+#include <random>    // std::random_device, std::mt19937, std::uniform_real_distribution
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Div.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 
-#include "aidge/backend/cpu.hpp"
+namespace Aidge {
 
-#include <memory>
+TEST_CASE("[cpu/operator] Div", "[Div][CPU]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
+    std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
+    std::uniform_int_distribution<int> boolDist(0,1);
 
-using namespace Aidge;
+    // Create MatMul Operator
+    std::shared_ptr<Node> myDiv = Div();
+    auto op = std::static_pointer_cast<OperatorTensor>(myDiv-> getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // Create 2 input Tensors
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op->associateInput(0,T0);
+    T0->setDataType(DataType::Float32);
+    T0->setBackend("cpu");
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+    T1->setDataType(DataType::Float32);
+    T1->setBackend("cpu");
+
+    // Create results Tensor
+    std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>();
+    Tres->setDataType(DataType::Float32);
+    Tres->setBackend("cpu");
+
+    // To measure execution time of 'MatMul_Op::forward()' member function call
+    std::chrono::time_point<std::chrono::system_clock> start;
+    std::chrono::time_point<std::chrono::system_clock> end;
+    std::chrono::duration<double, std::micro> duration{};
+
+    SECTION("DivImpl_cpu::forward()") {
+        SECTION("Scalar / Scalar") {
 
-TEST_CASE("[cpu/operator] Div(forward)", "[Div][CPU]") {
-    SECTION("2D Tensor by Singleton") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.07607108, 0.44075000},
-                {0.19494885, 0.20071143}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{0.5}});
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.15214217, 0.88150001},
-                {0.38989770, 0.40142286}
-            }
-        });
-
-        std::shared_ptr<Node> myDiv = Div();
-        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
-        op -> associateInput(0, input_1);
-        op -> associateInput(1, input_2);
-        op -> setDataType(DataType::Float32);
-        op -> setBackend("cpu");
-        op -> computeOutputDims();
-        myDiv -> forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
         }
+        SECTION("Scalar / +1-D Tensor") {
 
-    }
+        }
+        SECTION("+1-D Tensor / +1-D Tensor - same dimensions") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("2D Tensors") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.79780143, 0.49322051},
-                {0.84239346, 0.83737719}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
-            {
-                {0.59088874, 0.78858775},
-                {0.42879432, 0.17615074}
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {1.35017204, 0.62544787},
-                {1.96456301, 4.75375366}
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                const std::size_t nbDims = nbDimsDist(gen);
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
+
+                // without broadcasting
+                float* array0 = new float[nb_elements];
+                float* array1 = new float[nb_elements];
+                float* result = new float[nb_elements];
+
+                for (std::size_t i = 0; i < nb_elements; ++i) {
+                    array0[i] = valueDist(gen);
+                    array1[i] = valueDist(gen);
+                    result[i] = array0[i] / array1[i];
+                }
+
+                // input0
+                T0->resize(dims);
+                T0 -> getImpl() -> setRawPtr(array0, nb_elements);
+
+                // input1
+                T1->resize(dims);
+                T1 -> getImpl() -> setRawPtr(array1, nb_elements);
+
+                // results
+                Tres->resize(dims);
+                Tres -> getImpl() -> setRawPtr(result, nb_elements);
+
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myDiv->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                // with broadcasting
             }
-        });
-
-        std::shared_ptr<Node> myDiv = Div();
-        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
-        op -> associateInput(0, input_1);
-        op -> associateInput(1, input_2);
-        op -> setDataType(DataType::Float32);
-        op -> setBackend("cpu");
-        op -> computeOutputDims();
-        myDiv->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
 
-    }
+        SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("3D Tensor by 1D Tensor") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.24180168, 0.44319558, 0.06437260},
-                 {0.21270001, 0.34570599, 0.44151264}},
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions, replace some dimensions with '1' to get broadcasting
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                std::vector<std::size_t> dims0 = dims;
+                std::vector<std::size_t> dims1 = dims;
+                std::vector<std::size_t> dimsOut = dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims0[i] = 1;
+                    }
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                    dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i];
+                }
 
-                {{0.62294692, 0.98043168, 0.18628585},
-                 {0.33591706, 0.03432965, 0.32130069}}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array1D<float,3>{
-            {0.63475525, 0.58620811, 0.69340748}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.38093686, 0.75603795, 0.09283517},
-                 {0.33508980, 0.58973253, 0.63672900}},
-
-                {{0.98139703, 1.67249763, 0.26865280},
-                 {0.52920723, 0.05856223, 0.46336490}}
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) {
+                    array1[i] = valueDist(gen);
+                }
+
+                // compute true result
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1[2] > 1) ? c : 0)
+                                                    + ((dims1[3] > 1) ? d : 0);
+                                result[idx_out + d] = array0[idx0] / array1[idx1];
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " / " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myDiv->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
             }
-        });
-
-        std::shared_ptr<Node> myDiv = Div();
-        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
-        op -> associateInput(0, input_1);
-        op -> associateInput(1, input_2);
-        op -> setDataType(DataType::Float32);
-        op -> setBackend("cpu");
-        op -> computeOutputDims();
-        myDiv->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 12; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
+        SECTION("+1-D Tensor / 1-D Tensor") {
+            std::size_t number_of_operation = 0;
+            std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(std::size_t(1), std::size_t(3));
 
-    }
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims0(4);
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims0[i] = dimSizeDist(gen);
+                }
+                std::vector<std::size_t> dimsOut = dims0;
+                std::vector<std::size_t> dims1 = dims0;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                }
+                dims1.erase(dims1.cbegin(), dims1.cbegin() + nbRemovedDimsDist(gen));
 
-    SECTION("4D Tensor") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
-            {
-                {
-                    {{0.25675946, 0.36265653, 0.22386390},
-                     {0.30483031, 0.97449398, 0.73871714},
-                     {0.36169255, 0.04510212, 0.27525920}},
-
-                    {{0.73255682, 0.03885978, 0.24181491},
-                    {0.14465559, 0.86070061, 0.88848090},
-                    {0.74408931, 0.87412918, 0.19800508}},
-
-                    {{0.43551809, 0.73437816, 0.37513995},
-                     {0.25414777, 0.06396711, 0.98708153},
-                     {0.02140611, 0.84974837, 0.62108254}}
-                },
-                {
-                    {{0.86227137, 0.69357753, 0.41814715},
-                     {0.76048166, 0.46306920, 0.05907208},
-                     {0.76625377, 0.91793799, 0.92988223}},
-
-                    {{0.34362513, 0.85009813, 0.21107805},
-                     {0.65575773, 0.38140792, 0.48540717},
-                     {0.10045588, 0.85803932, 0.23778951}},
-
-                    {{0.30316389, 0.04176688, 0.17290735},
-                     {0.07942408, 0.48647392, 0.39440966},
-                     {0.26543915, 0.92589515, 0.83948994}}
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                std::size_t array1_size = std::accumulate(dims1.cbegin(), dims1.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                float* array1 = new float[array1_size];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < (dims0[0]*dims0[1]*dims0[2]*dims0[3]); ++i) {
+                    array0[i] = valueDist(gen);
                 }
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{3.0}});
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
-            {
-                {
-                    {{0.08558649, 0.12088551, 0.07462130},
-                     {0.10161010, 0.32483134, 0.24623905},
-                     {0.12056419, 0.01503404, 0.09175307}},
-
-                    {{0.24418561, 0.01295326, 0.08060497},
-                     {0.04821853, 0.28690019, 0.29616031},
-                     {0.24802977, 0.29137638, 0.06600169}},
-
-                    {{0.14517270, 0.24479271, 0.12504666},
-                     {0.08471593, 0.02132237, 0.32902718},
-                     {0.00713537, 0.28324947, 0.20702751}}
-                },
-                {
-                    {{0.28742379, 0.23119251, 0.13938238},
-                     {0.25349388, 0.15435641, 0.01969069},
-                     {0.25541791, 0.30597934, 0.30996075}},
-
-                    {{0.11454171, 0.28336605, 0.07035935},
-                     {0.21858591, 0.12713598, 0.16180240},
-                     {0.03348529, 0.28601310, 0.07926317}},
-
-                    {{0.10105463, 0.01392229, 0.05763578},
-                     {0.02647469, 0.16215797, 0.13146989},
-                     {0.08847972, 0.30863172, 0.27982998}}
+                for (std::size_t i = 0; i < array1_size; ++i) {
+                    array1[i] = valueDist(gen);
                 }
+
+                // compute true result
+                auto dims1_tmp = dims1;
+                dims1_tmp.insert(dims1_tmp.cbegin(), 4 - dims1_tmp.size(), std::size_t(1));
+
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1_tmp[1]*dims1_tmp[2]*dims1_tmp[3], dims1_tmp[2]*dims1_tmp[3], dims1_tmp[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1_tmp[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1_tmp[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1_tmp[2] > 1) ? c : 0)
+                                                    + ((dims1_tmp[3] > 1) ? d : 0);
+                                result[idx_out + d] = array0[idx0] / array1[idx1];
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " / " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, array1_size);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myDiv->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
             }
-        });
-
-        std::shared_ptr<Node> myDiv = Div();
-        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
-        op -> associateInput(0, input_1);
-        op -> associateInput(1, input_2);
-        op -> setDataType(DataType::Float32);
-        op -> setBackend("cpu");
-        op -> computeOutputDims();
-        myDiv->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 54; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
     }
-}
\ No newline at end of file
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_MulImpl.cpp b/unit_tests/operator/Test_MulImpl.cpp
index 1707bc81e0bb549bfe90078242f8a4eae77db3c3..5b5a05764ecb0298a08c3e9ceece448d46e63044 100644
--- a/unit_tests/operator/Test_MulImpl.cpp
+++ b/unit_tests/operator/Test_MulImpl.cpp
@@ -10,123 +10,307 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::uint16_t
+#include <chrono>
+#include <iostream>
+#include <memory>
+#include <numeric>   // std::accumulate
+#include <random>    // std::random_device, std::mt19937, std::uniform_real_distribution
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Mul.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 
-#include "aidge/backend/cpu.hpp"
+namespace Aidge {
 
-#include <memory>
+TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
+    std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
+    std::uniform_int_distribution<int> boolDist(0,1);
 
-using namespace Aidge;
+    // Create MatMul Operator
+    std::shared_ptr<Node> myMul = Mul();
+    auto op = std::static_pointer_cast<OperatorTensor>(myMul-> getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // Create 2 input Tensors
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op->associateInput(0,T0);
+    T0->setDataType(DataType::Float32);
+    T0->setBackend("cpu");
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+    T1->setDataType(DataType::Float32);
+    T1->setBackend("cpu");
+
+    // Create results Tensor
+    std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>();
+    Tres->setDataType(DataType::Float32);
+    Tres->setBackend("cpu");
+
+    // To measure execution time of 'MatMul_Op::forward()' member function call
+    std::chrono::time_point<std::chrono::system_clock> start;
+    std::chrono::time_point<std::chrono::system_clock> end;
+    std::chrono::duration<double, std::micro> duration{};
+
+    SECTION("MulImpl_cpu::forward()") {
+        SECTION("Scalar / Scalar") {
 
-TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") {
-    SECTION("2D Tensor by Singleton") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.38977361, 0.34064174},
-                {0.00427264, 0.90872520}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{3.0}});
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {1.16932082, 1.02192521},
-                {0.01281792, 2.72617555}
-            }
-        });
-
-        std::shared_ptr<Node> myMul = Mul();
-        auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
-        myMul->getOperator()->associateInput(0, input_1);
-        myMul->getOperator()->associateInput(1, input_2);
-        myMul->getOperator()->setDataType(DataType::Float32);
-        myMul->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        myMul->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
         }
+        SECTION("Scalar / +1-D Tensor") {
 
-    }
+        }
+        SECTION("+1-D Tensor / +1-D Tensor - same dimensions") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("2D Tensors") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.38977361, 0.34064174},
-                {0.00427264, 0.90872520}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
-            {
-                {0.02362096, 0.24084556},
-                {0.94690859, 0.13512510}
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.00920683, 0.08204205},
-                {0.00404580, 0.12279158}
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                const std::size_t nbDims = nbDimsDist(gen);
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
+
+                // without broadcasting
+                float* array0 = new float[nb_elements];
+                float* array1 = new float[nb_elements];
+                float* result = new float[nb_elements];
+
+                for (std::size_t i = 0; i < nb_elements; ++i) {
+                    array0[i] = valueDist(gen);
+                    array1[i] = valueDist(gen);
+                    result[i] = array0[i] * array1[i];
+                }
+
+                // input0
+                T0->resize(dims);
+                T0 -> getImpl() -> setRawPtr(array0, nb_elements);
+
+                // input1
+                T1->resize(dims);
+                T1 -> getImpl() -> setRawPtr(array1, nb_elements);
+
+                // results
+                Tres->resize(dims);
+                Tres -> getImpl() -> setRawPtr(result, nb_elements);
+
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myMul->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                // with broadcasting
             }
-        });
-
-        std::shared_ptr<Node> myMul = Mul();
-        auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
-        myMul->getOperator()->associateInput(0, input_1);
-        myMul->getOperator()->associateInput(1, input_2);
-        myMul->getOperator()->setDataType(DataType::Float32);
-        myMul->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        myMul->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
 
-    }
+        SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("3D Tensor by 1D Tensor") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.33647752, 0.89360154, 0.46586215},
-                 {0.71518236, 0.71481097, 0.97991812}},
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions, replace some dimensions with '1' to get broadcasting
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                std::vector<std::size_t> dims0 = dims;
+                std::vector<std::size_t> dims1 = dims;
+                std::vector<std::size_t> dimsOut = dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims0[i] = 1;
+                    }
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                    dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i];
+                }
 
-                {{0.17393428, 0.56849813, 0.18489265},
-                 {0.78397650, 0.00348300, 0.65758008}}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array1D<float,3>{
-            {0.15380561, 0.51063120, 0.93031412}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.05175213, 0.45630082, 0.43339813},
-                 {0.10999906, 0.36500478, 0.91163164}},
-
-                {{0.02675207, 0.29029289, 0.17200825},
-                 {0.12057999, 0.00177853, 0.61175603}}
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) {
+                    array1[i] = valueDist(gen);
+                }
+
+                // compute true result
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1[2] > 1) ? c : 0)
+                                                    + ((dims1[3] > 1) ? d : 0);
+                                result[idx_out + d] = array0[idx0] * array1[idx1];
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " * " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myMul->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
             }
-        });
-
-        std::shared_ptr<Node> myMul = Mul();
-        auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
-        myMul->getOperator()->associateInput(0, input_1);
-        myMul->getOperator()->associateInput(1, input_2);
-        myMul->getOperator()->setDataType(DataType::Float32);
-        myMul->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        myMul->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 12; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
+        SECTION("+1-D Tensor / 1-D Tensor") {
+            std::size_t number_of_operation = 0;
+            std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(std::size_t(1), std::size_t(3));
+
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims0(4);
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims0[i] = dimSizeDist(gen);
+                }
+                std::vector<std::size_t> dimsOut = dims0;
+                std::vector<std::size_t> dims1 = dims0;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                }
+                dims1.erase(dims1.cbegin(), dims1.cbegin() + nbRemovedDimsDist(gen));
+
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                std::size_t array1_size = std::accumulate(dims1.cbegin(), dims1.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                float* array1 = new float[array1_size];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < (dims0[0]*dims0[1]*dims0[2]*dims0[3]); ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                for (std::size_t i = 0; i < array1_size; ++i) {
+                    array1[i] = valueDist(gen);
+                }
 
+                // compute true result
+                auto dims1_tmp = dims1;
+                dims1_tmp.insert(dims1_tmp.cbegin(), 4 - dims1_tmp.size(), std::size_t(1));
+
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1_tmp[1]*dims1_tmp[2]*dims1_tmp[3], dims1_tmp[2]*dims1_tmp[3], dims1_tmp[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1_tmp[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1_tmp[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1_tmp[2] > 1) ? c : 0)
+                                                    + ((dims1_tmp[3] > 1) ? d : 0);
+                                result[idx_out + d] = array0[idx0] * array1[idx1];
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " * " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, array1_size);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myMul->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
+            }
+
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
+        }
     }
-}
\ No newline at end of file
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_PowImpl.cpp b/unit_tests/operator/Test_PowImpl.cpp
index 0c95e785958aca72b5ae1f5727134552310e5bef..01f9760275923b2249e5b6098b83b4ae27d5fb30 100644
--- a/unit_tests/operator/Test_PowImpl.cpp
+++ b/unit_tests/operator/Test_PowImpl.cpp
@@ -10,198 +10,308 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <cmath>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::uint16_t
+#include <chrono>
+#include <iostream>
+#include <memory>
+#include <numeric>   // std::accumulate
+#include <random>    // std::random_device, std::mt19937, std::uniform_real_distribution
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Pow.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 
-#include "aidge/backend/cpu.hpp"
+namespace Aidge {
 
-#include <memory>
+TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
+    std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
+    std::uniform_int_distribution<int> boolDist(0,1);
 
-using namespace Aidge;
+    // Create MatPow Operator
+    std::shared_ptr<Node> myPow = Pow();
+    auto op = std::static_pointer_cast<OperatorTensor>(myPow-> getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // Create 2 input Tensors
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op->associateInput(0,T0);
+    T0->setDataType(DataType::Float32);
+    T0->setBackend("cpu");
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+    T1->setDataType(DataType::Float32);
+    T1->setBackend("cpu");
+
+    // Create results Tensor
+    std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>();
+    Tres->setDataType(DataType::Float32);
+    Tres->setBackend("cpu");
+
+    // To measure execution time of 'MatPow_Op::forward()' member function call
+    std::chrono::time_point<std::chrono::system_clock> start;
+    std::chrono::time_point<std::chrono::system_clock> end;
+    std::chrono::duration<double, std::micro> duration{};
+
+    SECTION("PowImpl_cpu::forward()") {
+        SECTION("Scalar / Scalar") {
 
-TEST_CASE("[cpu/operator] Pow(forward)", "[Pow][CPU]") {
-    SECTION("2D Tensor by Singleton") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.42139274, 0.51524192},
-                {0.85247433, 0.13432795}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{2.0}});
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.17757183, 0.26547423},
-                {0.72671247, 0.01804400}
-            }
-        });
-
-        std::shared_ptr<Node> myPow = Pow();
-        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
-        op->associateInput(0, input_1);
-        op->associateInput(1, input_2);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        op->computeOutputDims();
-        myPow->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
         }
+        SECTION("Scalar / +1-D Tensor") {
 
-    }
+        }
+        SECTION("+1-D Tensor / +1-D Tensor - same dimensions") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("3D Tensor by 1D Tensor") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.87519985, 0.10536593, 0.20268351},
-                 {0.75532353, 0.95977652, 0.03897029}},
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                const std::size_t nbDims = nbDimsDist(gen);
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
 
-                {{0.67554104, 0.35499334, 0.27741563},
-                 {0.94270861, 0.48397779, 0.35532343}}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array1D<float,3>{
-            {0.39333701, 0.08719915, 0.16713941}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.94891787, 0.82182676, 0.76584703},
-                 {0.89549923, 0.99642646, 0.58137459}},
-
-                {{0.85702944, 0.91364944, 0.80709606},
-                 {0.97706109, 0.93867886, 0.84118503}}
+                // without broadcasting
+                float* array0 = new float[nb_elements];
+                float* array1 = new float[nb_elements];
+                float* result = new float[nb_elements];
+
+                for (std::size_t i = 0; i < nb_elements; ++i) {
+                    array0[i] = valueDist(gen);
+                    array1[i] = valueDist(gen);
+                    result[i] = std::pow(array0[i], array1[i]);
+                }
+
+                // input0
+                T0->resize(dims);
+                T0 -> getImpl() -> setRawPtr(array0, nb_elements);
+
+                // input1
+                T1->resize(dims);
+                T1 -> getImpl() -> setRawPtr(array1, nb_elements);
+
+                // results
+                Tres->resize(dims);
+                Tres -> getImpl() -> setRawPtr(result, nb_elements);
+
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myPow->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                // with broadcasting
             }
-        });
-
-        std::shared_ptr<Node> myPow = Pow();
-        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
-        op->associateInput(0, input_1);
-        op->associateInput(1, input_2);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        op->computeOutputDims();
-        myPow->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 12; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
 
-    }
+        SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("2D Tensors") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.79780143, 0.49322051},
-                {0.84239346, 0.83737719}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
-            {
-                {0.59088874, 0.78858775},
-                {0.42879432, 0.17615074}
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.87504572, 0.57271165},
-                {0.92909741, 0.96922028}
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions, replace some dimensions with '1' to get broadcasting
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                std::vector<std::size_t> dims0 = dims;
+                std::vector<std::size_t> dims1 = dims;
+                std::vector<std::size_t> dimsOut = dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims0[i] = 1;
+                    }
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                    dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i];
+                }
+
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) {
+                    array1[i] = valueDist(gen);
+                }
+
+                // compute true result
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1[2] > 1) ? c : 0)
+                                                    + ((dims1[3] > 1) ? d : 0);
+                                result[idx_out + d] = std::pow(array0[idx0], array1[idx1]);
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " ** " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myPow->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
             }
-        });
-
-        std::shared_ptr<Node> myPow = Pow();
-        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
-        op->associateInput(0, input_1);
-        op->associateInput(1, input_2);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        op->computeOutputDims();
-        myPow->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
+        SECTION("+1-D Tensor / 1-D Tensor") {
+            std::size_t number_of_operation = 0;
+            std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(std::size_t(1), std::size_t(3));
 
-    }
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims0(4);
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims0[i] = dimSizeDist(gen);
+                }
+                std::vector<std::size_t> dimsOut = dims0;
+                std::vector<std::size_t> dims1 = dims0;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                }
+                dims1.erase(dims1.cbegin(), dims1.cbegin() + nbRemovedDimsDist(gen));
 
-    SECTION("4D Tensor") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
-            {
-                {
-                    {{0.80191749, 0.45388508, 0.86550850},
-                     {0.47226250, 0.55809456, 0.59451854},
-                     {0.45497441, 0.02653158, 0.44041735}},
-                    {{0.30726379, 0.73146582, 0.46462774},
-                     {0.30268502, 0.78075552, 0.65154958},
-                     {0.91332406, 0.62448132, 0.53238851}},
-                    {{0.13917381, 0.43061519, 0.30198061},
-                     {0.12880909, 0.08995515, 0.29609048},
-                     {0.46449280, 0.47559714, 0.24193990}}
-                },
-                {
-                    {{0.87349969, 0.51625526, 0.16921073},
-                     {0.95035923, 0.10066575, 0.56729180},
-                     {0.84686232, 0.05965143, 0.03635806}},
-                    {{0.61107808, 0.59954077, 0.45627308},
-                     {0.84114522, 0.77186388, 0.37427086},
-                     {0.13415480, 0.00617349, 0.84260136}},
-                    {{0.55090177, 0.57292056, 0.29158932},
-                     {0.67131883, 0.96988875, 0.69545972},
-                     {0.80979776, 0.18238151, 0.19527155}}
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                std::size_t array1_size = std::accumulate(dims1.cbegin(), dims1.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                float* array1 = new float[array1_size];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < (dims0[0]*dims0[1]*dims0[2]*dims0[3]); ++i) {
+                    array0[i] = valueDist(gen);
                 }
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{2.0}});
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
-            {
-                {
-                    {{6.43071651e-01, 2.06011668e-01, 7.49104977e-01},
-                     {2.23031864e-01, 3.11469525e-01, 3.53452295e-01},
-                     {2.07001716e-01, 7.03924568e-04, 1.93967447e-01}},
-
-                    {{9.44110379e-02, 5.35042226e-01, 2.15878934e-01},
-                     {9.16182250e-02, 6.09579206e-01, 4.24516857e-01},
-                     {8.34160864e-01, 3.89976919e-01, 2.83437520e-01}},
-
-                    {{1.93693489e-02, 1.85429439e-01, 9.11922902e-02},
-                     {1.65917836e-02, 8.09192937e-03, 8.76695737e-02},
-                     {2.15753555e-01, 2.26192638e-01, 5.85349165e-02}}
-                },
-                {
-                    {{7.63001740e-01, 2.66519487e-01, 2.86322720e-02},
-                     {9.03182685e-01, 1.01335924e-02, 3.21819991e-01},
-                     {7.17175782e-01, 3.55829368e-03, 1.32190844e-03}},
-
-                    {{3.73416424e-01, 3.59449148e-01, 2.08185121e-01},
-                     {7.07525253e-01, 5.95773816e-01, 1.40078679e-01},
-                     {1.79975089e-02, 3.81119971e-05, 7.09977031e-01}},
-
-                    {{3.03492755e-01, 3.28237981e-01, 8.50243345e-02},
-                     {4.50668961e-01, 9.40684199e-01, 4.83664215e-01},
-                     {6.55772448e-01, 3.32630165e-02, 3.81309800e-02}}
+                for (std::size_t i = 0; i < array1_size; ++i) {
+                    array1[i] = valueDist(gen);
                 }
+
+                // compute true result
+                auto dims1_tmp = dims1;
+                dims1_tmp.insert(dims1_tmp.cbegin(), 4 - dims1_tmp.size(), std::size_t(1));
+
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1_tmp[1]*dims1_tmp[2]*dims1_tmp[3], dims1_tmp[2]*dims1_tmp[3], dims1_tmp[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1_tmp[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1_tmp[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1_tmp[2] > 1) ? c : 0)
+                                                    + ((dims1_tmp[3] > 1) ? d : 0);
+                                result[idx_out + d] = std::pow(array0[idx0], array1[idx1]);
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " ** " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, array1_size);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myPow->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
             }
-        });
-
-        std::shared_ptr<Node> myPow = Pow();
-        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
-        op->associateInput(0, input_1);
-        op->associateInput(1, input_2);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        op->computeOutputDims();
-        myPow->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 54; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
     }
-}
\ No newline at end of file
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_SubImpl.cpp b/unit_tests/operator/Test_SubImpl.cpp
index dfd64078b77a557e07eb11cb958ac24eeb1f9aa3..f9ba894f081b76b3abd0f0909636a38eaee3601a 100644
--- a/unit_tests/operator/Test_SubImpl.cpp
+++ b/unit_tests/operator/Test_SubImpl.cpp
@@ -10,123 +10,307 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::uint16_t
+#include <chrono>
+#include <iostream>
+#include <memory>
+#include <numeric>   // std::accumulate
+#include <random>    // std::random_device, std::mt19937, std::uniform_real_distribution
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Sub.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 
-#include "aidge/backend/cpu.hpp"
+namespace Aidge {
 
-#include <memory>
+TEST_CASE("[cpu/operator] Sub", "[Sub][CPU]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
+    std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
+    std::uniform_int_distribution<int> boolDist(0,1);
 
-using namespace Aidge;
+    // Create MatMul Operator
+    std::shared_ptr<Node> mySub = Sub();
+    auto op = std::static_pointer_cast<OperatorTensor>(mySub-> getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // Create 2 input Tensors
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op->associateInput(0,T0);
+    T0->setDataType(DataType::Float32);
+    T0->setBackend("cpu");
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+    T1->setDataType(DataType::Float32);
+    T1->setBackend("cpu");
+
+    // Create results Tensor
+    std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>();
+    Tres->setDataType(DataType::Float32);
+    Tres->setBackend("cpu");
+
+    // To measure execution time of 'MatMul_Op::forward()' member function call
+    std::chrono::time_point<std::chrono::system_clock> start;
+    std::chrono::time_point<std::chrono::system_clock> end;
+    std::chrono::duration<double, std::micro> duration{};
+
+    SECTION("SubImpl_cpu::forward()") {
+        SECTION("Scalar / Scalar") {
 
-TEST_CASE("[cpu/operator] Sub(forward)", "[Sub][CPU]") {
-    SECTION("2D Tensor by Singleton") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.34234560, 0.92812711},
-                {0.73706615, 0.69953883}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{2.5}});
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {-2.15765429, -1.57187295},
-                {-1.76293385, -1.80046117}
-            }
-        });
-
-        std::shared_ptr<Node> mySub = Sub();
-        auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
-        mySub->getOperator()->associateInput(0, input_1);
-        mySub->getOperator()->associateInput(1, input_2);
-        mySub->getOperator()->setDataType(DataType::Float32);
-        mySub->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        mySub->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
         }
+        SECTION("Scalar / +1-D Tensor") {
 
-    }
+        }
+        SECTION("+1-D Tensor / +1-D Tensor - same dimensions") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("2D Tensors") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.34234560, 0.92812711},
-                {0.73706615, 0.69953883}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
-            {
-                {0.61729127, 0.83004373},
-                {0.72002089, 0.52473849}
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {-0.27494568,  0.09808338},
-                {0.01704526,  0.17480034}
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                const std::size_t nbDims = nbDimsDist(gen);
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
+
+                // without broadcasting
+                float* array0 = new float[nb_elements];
+                float* array1 = new float[nb_elements];
+                float* result = new float[nb_elements];
+
+                for (std::size_t i = 0; i < nb_elements; ++i) {
+                    array0[i] = valueDist(gen);
+                    array1[i] = valueDist(gen);
+                    result[i] = array0[i] - array1[i];
+                }
+
+                // input0
+                T0->resize(dims);
+                T0 -> getImpl() -> setRawPtr(array0, nb_elements);
+
+                // input1
+                T1->resize(dims);
+                T1 -> getImpl() -> setRawPtr(array1, nb_elements);
+
+                // results
+                Tres->resize(dims);
+                Tres -> getImpl() -> setRawPtr(result, nb_elements);
+
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                mySub->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                // with broadcasting
             }
-        });
-
-        std::shared_ptr<Node> mySub = Sub();
-        auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
-        mySub->getOperator()->associateInput(0, input_1);
-        mySub->getOperator()->associateInput(1, input_2);
-        mySub->getOperator()->setDataType(DataType::Float32);
-        mySub->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        mySub->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
 
-    }
+        SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("3D Tensor by 1D Tensor") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.84181279, 0.20655948, 0.09750116},
-                 {0.37723488, 0.73120135, 0.04666907}},
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions, replace some dimensions with '1' to get broadcasting
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                std::vector<std::size_t> dims0 = dims;
+                std::vector<std::size_t> dims1 = dims;
+                std::vector<std::size_t> dimsOut = dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims0[i] = 1;
+                    }
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                    dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i];
+                }
 
-                {{0.91483921, 0.93985939, 0.58823180},
-                 {0.39963132, 0.67879969, 0.33209187}}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array1D<float,3>{
-            {0.04784805, 0.91903114, 0.38606840}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.79396474, -0.71247166, -0.28856725},
-                 {0.32938683, -0.18782979, -0.33939934}},
-
-                {{0.86699116,  0.02082825,  0.20216340},
-                 {0.35178328, -0.24023145, -0.05397654}}
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) {
+                    array1[i] = valueDist(gen);
+                }
+
+                // compute true result
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1[2] > 1) ? c : 0)
+                                                    + ((dims1[3] > 1) ? d : 0);
+                                result[idx_out + d] = array0[idx0] - array1[idx1];
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " - " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                mySub->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
             }
-        });
-
-        std::shared_ptr<Node> mySub = Sub();
-        auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
-        mySub->getOperator()->associateInput(0, input_1);
-        mySub->getOperator()->associateInput(1, input_2);
-        mySub->getOperator()->setDataType(DataType::Float32);
-        mySub->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        mySub->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 12; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
+        SECTION("+1-D Tensor / 1-D Tensor") {
+            std::size_t number_of_operation = 0;
+            std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(std::size_t(1), std::size_t(3));
+
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims0(4);
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims0[i] = dimSizeDist(gen);
+                }
+                std::vector<std::size_t> dimsOut = dims0;
+                std::vector<std::size_t> dims1 = dims0;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                }
+                dims1.erase(dims1.cbegin(), dims1.cbegin() + nbRemovedDimsDist(gen));
+
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                std::size_t array1_size = std::accumulate(dims1.cbegin(), dims1.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                float* array1 = new float[array1_size];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < (dims0[0]*dims0[1]*dims0[2]*dims0[3]); ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                for (std::size_t i = 0; i < array1_size; ++i) {
+                    array1[i] = valueDist(gen);
+                }
 
+                // compute true result
+                auto dims1_tmp = dims1;
+                dims1_tmp.insert(dims1_tmp.cbegin(), 4 - dims1_tmp.size(), std::size_t(1));
+
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1_tmp[1]*dims1_tmp[2]*dims1_tmp[3], dims1_tmp[2]*dims1_tmp[3], dims1_tmp[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1_tmp[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1_tmp[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1_tmp[2] > 1) ? c : 0)
+                                                    + ((dims1_tmp[3] > 1) ? d : 0);
+                                result[idx_out + d] = array0[idx0] - array1[idx1];
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " - " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, array1_size);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                mySub->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
+            }
+
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
+        }
     }
-}
\ No newline at end of file
+}
+} // namespace Aidge