diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index 18963ced1084c56c1e4c04dceec735126bba962a..39d5a378b7ed94b0455ad5cd36464b180c52c535 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -143,72 +143,72 @@ build:ubuntu_python:
     paths:
       - venv/
 
-build:windows_cpp:
-  stage: build
-  needs: []
-  tags:
-    - windows
-
-  image: buildtools
-  before_script:
-    # Install Chocolatey
-    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-    # Install dependencies
-    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-    - choco install git -Y
-    - choco install python -Y
-    # Update PATH
-    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-  script:
-    # Download dependencies
-    # aidge_core
-    - $DEPENDENCY_NAME="aidge_core"
-    - $DEPENDENCY_JOB="build:windows_cpp"
-    - !reference [.download_dependency_windows, script]
-    - Remove-Item .\build_cpp\ -Recurse -Force -ErrorAction Ignore
-
-    - $env:CMAKE_PREFIX_PATH = '../install_cpp'
-    - mkdir -p build_cpp
-    - cd build_cpp
-    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
-    - cmake --build . -j2
-    - cmake --install . --config Debug
-
-  artifacts:
-    expire_in: 1 week
-    paths:
-      - build_cpp/
-      - install_cpp/
-
-build:windows_python:
-  stage: build
-  needs: []
-  tags:
-    - windows
-
-  image: buildtools
-  before_script:
-    # Install Chocolatey
-    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-    # Install dependencies
-    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-    - choco install git -Y
-    - choco install python -Y
-    # Update PATH
-    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-  script:
-    # Download dependencies
-    # aidge_core (Python)
-    - $DEPENDENCY_NAME="aidge_core"
-    - $DEPENDENCY_JOB="build:windows_python"
-    - !reference [.download_dependency_windows, script]
-
-    - python -m pip install virtualenv
-    - virtualenv venv
-    - venv\Scripts\Activate.ps1
-    - python -m pip install -r requirements.txt
-    - python -m pip install .
-  artifacts:
-    expire_in: 1 week
-    paths:
-      - venv/
+# build:windows_cpp:
+#   stage: build
+#   needs: []
+#   tags:
+#     - windows
+
+#   image: buildtools
+#   before_script:
+#     # Install Chocolatey
+#     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+#     # Install dependencies
+#     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+#     - choco install git -Y
+#     - choco install python -Y
+#     # Update PATH
+#     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+#   script:
+#     # Download dependencies
+#     # aidge_core
+#     - $DEPENDENCY_NAME="aidge_core"
+#     - $DEPENDENCY_JOB="build:windows_cpp"
+#     - !reference [.download_dependency_windows, script]
+#     - Remove-Item .\build_cpp\ -Recurse -Force -ErrorAction Ignore
+
+#     - $env:CMAKE_PREFIX_PATH = '../install_cpp'
+#     - mkdir -p build_cpp
+#     - cd build_cpp
+#     - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
+#     - cmake --build . -j2
+#     - cmake --install . --config Debug
+
+#   artifacts:
+#     expire_in: 1 week
+#     paths:
+#       - build_cpp/
+#       - install_cpp/
+
+# build:windows_python:
+#   stage: build
+#   needs: []
+#   tags:
+#     - windows
+
+#   image: buildtools
+#   before_script:
+#     # Install Chocolatey
+#     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+#     # Install dependencies
+#     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+#     - choco install git -Y
+#     - choco install python -Y
+#     # Update PATH
+#     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+#   script:
+#     # Download dependencies
+#     # aidge_core (Python)
+#     - $DEPENDENCY_NAME="aidge_core"
+#     - $DEPENDENCY_JOB="build:windows_python"
+#     - !reference [.download_dependency_windows, script]
+
+#     - python -m pip install virtualenv
+#     - virtualenv venv
+#     - venv\Scripts\Activate.ps1
+#     - python -m pip install -r requirements.txt
+#     - python -m pip install .
+#   artifacts:
+#     expire_in: 1 week
+#     paths:
+#       - venv/
diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml
index 3cada635eb25b3eb87e8318eb6e26723f7a27dd6..d0c94c2a3bcbb2908863b15b2b52ef068a55ff94 100644
--- a/.gitlab/ci/test.gitlab-ci.yml
+++ b/.gitlab/ci/test.gitlab-ci.yml
@@ -26,23 +26,23 @@ test:ubuntu_python:
     reports:
       junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml
 
-test:windows_cpp:
-  stage: test
-  needs: ["build:windows_cpp"]
-  tags:
-    - windows
-  image: buildtools
-  before_script:
-    # Install Chocolatey
-    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-    # Install dependencies
-    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-    - choco install python -Y
-    # Update PATH
-    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-  script:
-    - cd build_cpp
-    - ctest --output-junit ctest-results.xml --output-on-failure
-  artifacts:
-    reports:
-      junit: build_cpp/ctest-results.xml
+# test:windows_cpp:
+#   stage: test
+#   needs: ["build:windows_cpp"]
+#   tags:
+#     - windows
+#   image: buildtools
+#   before_script:
+#     # Install Chocolatey
+#     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+#     # Install dependencies
+#     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+#     - choco install python -Y
+#     # Update PATH
+#     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+#   script:
+#     - cd build_cpp
+#     - ctest --output-junit ctest-results.xml --output-on-failure
+#   artifacts:
+#     reports:
+#       junit: build_cpp/ctest-results.xml
diff --git a/aidge_backend_cpu/unit_tests/test_recipies.py b/aidge_backend_cpu/unit_tests/test_recipes.py
similarity index 90%
rename from aidge_backend_cpu/unit_tests/test_recipies.py
rename to aidge_backend_cpu/unit_tests/test_recipes.py
index e343fad1aeda82555a57778a394a4590b1e8772e..5586ab246e61d04b5754421b90ef3cd30629c1c3 100644
--- a/aidge_backend_cpu/unit_tests/test_recipies.py
+++ b/aidge_backend_cpu/unit_tests/test_recipes.py
@@ -15,7 +15,7 @@ import aidge_backend_cpu
 from functools import reduce
 import numpy as np
 
-class test_recipies(unittest.TestCase):
+class test_recipes(unittest.TestCase):
     def setUp(self):
         pass
 
@@ -33,12 +33,9 @@ class test_recipies(unittest.TestCase):
         conv = aidge_core.Conv2D(1, 1, [3, 3], name="Conv0")
         bn = aidge_core.BatchNorm2D(1, name="Add0")
 
-        graph_view = aidge_core.sequential([conv, bn])
+        graph_view = aidge_core.sequential([input_node, conv, bn])
 
         # Add random values to conv and BatchNorm parameters
-        input_node.add_child(graph_view)
-        input_node.get_operator().set_datatype(aidge_core.DataType.Float32)
-        input_node.get_operator().set_backend("cpu")
         graph_view.set_datatype(aidge_core.DataType.Float32)
         graph_view.set_backend("cpu")
 
diff --git a/aidge_backend_cpu/unit_tests/test_scheduler.py b/aidge_backend_cpu/unit_tests/test_scheduler.py
index 2f174efed32fc814010ff61cd42c1bae1105674e..0c41d59963c7633151745f2efe1f1fac3ee07815 100644
--- a/aidge_backend_cpu/unit_tests/test_scheduler.py
+++ b/aidge_backend_cpu/unit_tests/test_scheduler.py
@@ -40,18 +40,14 @@ class test_scheduler(unittest.TestCase):
         input_data =  np.array([0]).astype(np.float32)
         input_tensor = aidge_core.Tensor(input_data)
 
-        input_node = aidge_core.Producer(input_tensor, "X")
-
         graph_view = aidge_core.sequential([
+            aidge_core.Producer(input_tensor, "X"),
             aidge_core.FC(1, 50, name='0'),
             aidge_core.FC(50, 50, name='1'),
             aidge_core.FC(50, 10, name='2'),
         ])
         EXPECTED_SCHEDULE = ['0', '1', '2']
 
-        input_node.add_child(graph_view)
-        input_node.get_operator().set_datatype(aidge_core.DataType.Float32)
-        input_node.get_operator().set_backend("cpu")
         graph_view.set_datatype(aidge_core.DataType.Float32)
         graph_view.set_backend("cpu")
 
@@ -60,15 +56,17 @@ class test_scheduler(unittest.TestCase):
         scheduler = aidge_core.SequentialScheduler(graph_view)
         scheduler.generate_scheduling()
 
-        self.assertListEqual([i.name() for i in scheduler.get_static_scheduling()], EXPECTED_SCHEDULE)
+        self.assertEqual(len(scheduler.get_static_scheduling()), 10)
+        # Do not care about the order of execution of the producers
+        self.assertListEqual([i.name() for i in scheduler.get_static_scheduling()[-3:]], EXPECTED_SCHEDULE)
 
 
     def test_parallel_scheduling(self):
         input_data =  np.array([0]).astype(np.float32)
         input_tensor = aidge_core.Tensor(input_data)
 
-        input_node = aidge_core.Producer(input_tensor, "X")
         graph_view = aidge_core.sequential([
+            aidge_core.Producer(input_tensor, "X"),
             aidge_core.FC(1, 50, name='0'),
             aidge_core.parallel([aidge_core.FC(50, 50, name='1'), aidge_core.FC(50, 50, name='3')]),
             aidge_core.Add(2, name='2'),
@@ -76,9 +74,6 @@ class test_scheduler(unittest.TestCase):
 
         EXPECTED_SCHEDULE = [['0', '1', '3', '2'],  ['0', '3', '1', '2']] # Both scheduling are valid !
 
-        input_node.add_child(graph_view)
-        input_node.get_operator().set_datatype(aidge_core.DataType.Float32)
-        input_node.get_operator().set_backend("cpu")
         graph_view.set_datatype(aidge_core.DataType.Float32)
         graph_view.set_backend("cpu")
 
@@ -87,7 +82,9 @@ class test_scheduler(unittest.TestCase):
         scheduler = aidge_core.SequentialScheduler(graph_view)
         scheduler.generate_scheduling()
 
-        self.assertTrue([i.name() for i in scheduler.get_static_scheduling()] in EXPECTED_SCHEDULE)
+        self.assertEqual(len(scheduler.get_static_scheduling()), 11)
+        # Do not care about the order of execution of the producers
+        self.assertTrue([i.name() for i in scheduler.get_static_scheduling()[-4:]] in EXPECTED_SCHEDULE)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/aidge_backend_cpu/unit_tests/test_tensor.py b/aidge_backend_cpu/unit_tests/test_tensor.py
deleted file mode 100644
index 37531b43cf7755dfb760e575450b70bfa9a6ff68..0000000000000000000000000000000000000000
--- a/aidge_backend_cpu/unit_tests/test_tensor.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import unittest
-import aidge_core
-import aidge_backend_cpu
-import numpy as np
-
-
-class test_tensor(unittest.TestCase):
-    """Test tensor binding
-    """
-    def setUp(self):
-        pass
-    def tearDown(self):
-        pass
-
-    def test_getavailable_backends(self):
-        self.assertTrue("cpu" in aidge_core.Tensor.get_available_backends())
-
-    def test_numpy_int_to_tensor(self):
-        np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32)
-        # Numpy -> Tensor
-        t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Int32)
-        for i_t, i_n in zip(t, np_array.flatten()):
-            self.assertTrue(i_t == i_n)
-        for i,j in zip(t.dims(), np_array.shape):
-            self.assertEqual(i,j)
-    def test_tensor_int_to_numpy(self):
-        np_array = np.arange(9).reshape(1,1,3,3)
-        # Numpy -> Tensor
-        t = aidge_core.Tensor(np_array)
-        # Tensor -> Numpy
-        nnarray = np.array(t)
-        for i_nn, i_n in zip(nnarray.flatten(), np_array.flatten()):
-            self.assertTrue(i_nn == i_n)
-        for i,j in zip(t.dims(), nnarray.shape):
-            self.assertEqual(i,j)
-
-    def test_numpy_int64_to_tensor(self):
-        np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64)
-        # Numpy -> Tensor
-        t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Int64)
-        for i_t, i_n in zip(t, np_array.flatten()):
-            self.assertTrue(i_t == i_n)
-        for i,j in zip(t.dims(), np_array.shape):
-            self.assertEqual(i,j)
-
-    def test_numpy_float_to_tensor(self):
-        t = aidge_core.Tensor()
-        np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
-        # Numpy -> Tensor
-        t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Float32)
-        for i_t, i_n in zip(t, np_array.flatten()):
-            self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference
-        for i,j in zip(t.dims(), np_array.shape):
-            self.assertEqual(i,j)
-
-    def test_get_set(self):
-        dims = [2,2,2]
-
-        np_array = np.arange(8).reshape(dims).astype(np.int32)
-        # Numpy -> Tensor
-        t = aidge_core.Tensor(np_array)
-        for i in range(8):
-            self.assertEqual(t[i], i)
-            t[i] = 5
-            self.assertEqual(t[i], 5)
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp
index 2020c9dbcd1b0ed690e499bca44bbb70c49f7e45..78a317281475bd05ee317127b02cfeddcfd07e49 100644
--- a/include/aidge/backend/cpu.hpp
+++ b/include/aidge/backend/cpu.hpp
@@ -12,7 +12,6 @@
 #ifndef AIDGE_CPU_IMPORTS_H_
 #define AIDGE_CPU_IMPORTS_H_
 
-#include "aidge/backend/cpu/data/TensorImpl.hpp"
 #include "aidge/backend/cpu/operator/AddImpl.hpp"
 #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
 #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp"
@@ -26,18 +25,24 @@
 #include "aidge/backend/cpu/operator/GatherImpl.hpp"
 #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
+#include "aidge/backend/cpu/operator/MemorizeImpl.hpp"
 #include "aidge/backend/cpu/operator/MulImpl.hpp"
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
+#include "aidge/backend/cpu/operator/PopImpl.hpp"
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
-#include "aidge/backend/cpu/operator/ProducerImpl.hpp"
 #include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
 #include "aidge/backend/cpu/operator/ScalingImpl.hpp"
+#include "aidge/backend/cpu/operator/SigmoidImpl.hpp"
 #include "aidge/backend/cpu/operator/SliceImpl.hpp"
 #include "aidge/backend/cpu/operator/SqrtImpl.hpp"
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
+#include "aidge/backend/cpu/operator/TanhImpl.hpp"
 #include "aidge/backend/cpu/operator/TransposeImpl.hpp"
 
-#endif /* AIDGE_CPU_IMPORTS_H_ */
\ No newline at end of file
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+
+#endif /* AIDGE_CPU_IMPORTS_H_ */
+
diff --git a/include/aidge/backend/cpu/data/Broadcasting.hpp b/include/aidge/backend/cpu/data/Broadcasting.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..cb969cb54806a204072763a1672ee5266fb6347e
--- /dev/null
+++ b/include/aidge/backend/cpu/data/Broadcasting.hpp
@@ -0,0 +1,49 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_DATA_BROADCASTING_H_
+#define AIDGE_CPU_DATA_BROADCASTING_H_
+
+#include <vector>
+
+namespace Aidge {
+
+// Function to broadCast an input dims vector into the same size as an outputDims vector
+
+    /**
+     * @brief  Broadcast an input dims vector into the same size as an outputDims vector
+     * @details The missing dimensions would be completed by 1
+     * @param outputDims The vector of dimensions to follow 
+     * @param dimsToBroadcast The vecotr of dimensions to braodcast
+     * @return std::vector<std::size_t> a broadcasted vector by addding 1 on the missing dimensions.
+     */
+    std::vector<std::size_t> getBroadcastedDims(const std::vector<std::size_t>& outputDims, const std::vector<std::size_t>& dimsToBroadcast);
+
+    /**
+     * @brief Get a vector of indexes along the dimensions vector from a flattened index
+     * @param dimensions The vector of dimensions we want the indexes on
+     * @param idx The flattened index
+     * @return std::vector<std::size_t> vector of indexes along dimensions.
+     */
+    std::vector<std::size_t> getMultiDimIndices(const std::vector<std::size_t>& dimensions, std::size_t idx);
+
+    // Function to get a flattened index from multi-dimensional indices
+    /**
+     * @brief Get a flattened index the dimensions vector from a given vector of indices on a broadcasted vector
+     * @param dimensions The vector of dimensions we want the flattened index on
+     * @param indices The vector of indices we want to flatten
+     * @return std::size_t The flattened index on the dimensions vector
+     */
+    std::size_t getFlattenedIndex(const std::vector<std::size_t>& dimensions, const std::vector<std::size_t>& indices);
+
+} // namespace Aidge
+
+#endif // AIDGE_CPU_DATA_BROADCASTING_H_
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/data/GetCPUPtr.h b/include/aidge/backend/cpu/data/GetCPUPtr.h
deleted file mode 100644
index 47e3b07e8fa08cdcd714745a9a49bb03e30f79f5..0000000000000000000000000000000000000000
--- a/include/aidge/backend/cpu/data/GetCPUPtr.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_
-#define AIDGE_CPU_DATA_GETCPUPTR_H_
-
-#include "aidge/data/Tensor.hpp"
-
-namespace Aidge {
-inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
-  const auto tensor = std::static_pointer_cast<Tensor>(data);
-  return tensor->getImpl()->hostPtr(tensor->getImplOffset());
-}
-} // namespace Aidge
-
-#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
deleted file mode 100644
index 46dfae3d53b4b201507290bd538ea13737919c3e..0000000000000000000000000000000000000000
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ /dev/null
@@ -1,193 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPU_DATA_TENSORIMPL_H_
-#define AIDGE_CPU_DATA_TENSORIMPL_H_
-
-#include "aidge/backend/TensorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/data/half.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/utils/Types.h"
-#include "aidge/utils/ErrorHandling.hpp"
-#include "aidge/utils/future_std/span.hpp"
-
-namespace Aidge {
-
-template <class T>
-class TensorImpl_cpu : public TensorImpl {
-private:
-    /// Pointer to the data and its capacity
-    future_std::span<T> mData;
-    /// If this instance own the data, std::unique_ptr manages it
-    std::unique_ptr<T[]> mDataOwner;
-
-public:
-    static constexpr const char *Backend = "cpu";
-
-    TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {}
-
-    bool operator==(const TensorImpl &otherImpl) const override final {
-        const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
-        AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts);
-
-        std::size_t i = 0;
-        for (; i < mNbElts &&
-               *(mData.data()+i) == *static_cast<const T*>(typedOtherImpl.rawPtr(i));
-               ++i) {
-        }
-        return i == mNbElts;
-    }
-
-    static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) {
-        return std::make_shared<TensorImpl_cpu<T>>(device, length);
-    }
-
-    inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
-
-    void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
-        const T* srcT = static_cast<const T *>(src);
-        T* dstT = static_cast<T *>(rawPtr(offset));
-
-        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
-        AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
-        std::copy(srcT, srcT + length, dstT);
-    }
-
-    void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final {
-        if (length == 0) {
-            return;
-        }
-
-        T* dstT = static_cast<T *>(rawPtr(offset));
-        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
-        switch (srcDt)
-        {
-            case DataType::Float64:
-                std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
-                        dstT);
-                break;
-            case DataType::Float32:
-                std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
-                        dstT);
-                break;
-            case DataType::Float16:
-                std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
-                        dstT);
-                break;
-            case DataType::Int64:
-                std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::UInt64:
-                std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::Int32:
-                std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::UInt32:
-                std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::Int16:
-                std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::UInt16:
-                std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::Int8:
-                std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
-                        dstT);
-                break;
-            case DataType::UInt8:
-                std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
-                        dstT);
-                break;
-            default:
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
-                break;
-        }
-    }
-
-    void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
-        AIDGE_ASSERT(device.first == Backend, "backend must match");
-        AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
-        copy(src, length, offset);
-    }
-
-    inline void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
-        copy(src, length, offset);
-    }
-
-    void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
-        const T* src = static_cast<const T*>(rawPtr(offset));
-        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
-        std::copy(src, src + length, static_cast<T *>(dst));
-    }
-
-    void *rawPtr(NbElts_t offset = 0) override final {
-        lazyInit();
-        return (mData.data() + offset);
-    };
-
-    const void *rawPtr(NbElts_t offset = 0) const override final {
-        AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr");
-        return (mData.data() + offset);
-    };
-
-    void *hostPtr(NbElts_t offset = 0) override final {
-        lazyInit();
-        return (mData.data() + offset);
-    };
-
-    const void *hostPtr(NbElts_t offset = 0) const override final {
-        AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const hostPtr");
-        return (mData.data() + offset);
-    };
-
-    void setRawPtr(void *ptr, NbElts_t length) override final {
-        AIDGE_ASSERT(length >= mNbElts, "trying to set raw pointer of insufficient capacity");
-        mData = future_std::span<T>(static_cast<T *>(ptr), length);
-        mDataOwner.reset();
-    };
-
-    virtual ~TensorImpl_cpu() = default;
-
-private:
-    void lazyInit() {
-        if (mData.size() < mNbElts) {
-            // Need more data, a re-allocation will occur
-            AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
-            mDataOwner.reset(new T[mNbElts]);
-            mData = future_std::span<T>(mDataOwner.get(), mNbElts);
-        }
-    }
-};
-
-namespace {
-static Registrar<Tensor> registrarTensorImpl_cpu_Float64(
-        {"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
-        {"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
-        {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
-        {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
-        {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create);
-}  // namespace
-}  // namespace Aidge
-
-#endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp
index 0299148d086ae6e2be967232e8157c6a6229b0f7..1d3b29d43678e8d97e05b9b169a98f7e757838d8 100644
--- a/include/aidge/backend/cpu/operator/AddImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl.hpp
@@ -12,28 +12,29 @@
 #ifndef AIDGE_CPU_OPERATOR_ADDIMPL_H_
 #define AIDGE_CPU_OPERATOR_ADDIMPL_H_
 
+#include <cstddef>  // std::size_t
+#include <memory>   // std::unique_ptr, std::make_unique
+#include <string>
+#include <vector>
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Add.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include <memory>
-#include <vector>
 
 namespace Aidge {
-// class Add_Op<2>;
 
 // compute kernel registry for forward and backward
 class AddImplForward_cpu
-    : public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const std::vector<const void*>, void*)> {};
+    : public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)> {};
 
 class AddImplBackward_cpu
-    : public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const std::vector<const void*>, void*)> {};
+    : public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)> {};
 
 
 class AddImpl_cpu : public OperatorImpl {
 public:
-    AddImpl_cpu(const Add_Op& op) : OperatorImpl(op) {}
+    AddImpl_cpu(const Add_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<AddImpl_cpu> create(const Add_Op& op) {
         return std::make_unique<AddImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp
index 631ad44a562c17d41ad019a1da112dbf8a69185c..478a0226f43ccbc64d567a56ab89a558179438c5 100644
--- a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp
@@ -14,12 +14,13 @@
 
 #include "aidge/utils/Registrar.hpp"
 
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/AddImpl.hpp"
 
 namespace Aidge {
 
 template <class I, class O>
-void AddImpl_cpu_forward_kernel(const std::size_t inputLength, const std::vector<const void*> inputs_, void* output_) {
+void AddImpl_cpu_forward_kernel(const std::vector<const void*> inputs_, const std::vector<std::vector<std::size_t>>& inputDims, const std::size_t outputLength, const std::vector<std::size_t>& outDims, void* output_) {
     // FIXME: missing Add attributes as arguments
     std::vector<const I*> inputs;
     for (const auto& input_ : inputs_) {
@@ -27,12 +28,15 @@ void AddImpl_cpu_forward_kernel(const std::size_t inputLength, const std::vector
     }
     O* output = static_cast<O*>(output_);
 
-    for (std::size_t oIndex = 0; oIndex < inputLength; ++oIndex) {
+	for (std::size_t oIndex = 0; oIndex < outputLength; ++oIndex)
+	{
         output[oIndex] = 0;
-        for (std::size_t iIndex = 0; iIndex < inputs.size(); ++iIndex) {
-            output[oIndex] += inputs[iIndex][oIndex];
-        }
-    }
+		std::vector<size_t> indexes = getMultiDimIndices(outDims, oIndex);
+		for(std::size_t iIndex = 0; iIndex < inputs.size(); ++iIndex) {
+			std::size_t idx = getFlattenedIndex(inputDims[iIndex], indexes);
+            output[oIndex] += inputs[iIndex][idx];
+		}
+	}
 }
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
index bfb2b1947281fc30e38fd1fe1663bd5de415d3ee..1b62de7e145dfab02e78319600c1b30b29fd715b 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
@@ -38,7 +38,7 @@ class AvgPoolingImpl2DBackward_cpu
 
 class AvgPoolingImpl2D_cpu : public OperatorImpl {
 public:
-    AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : OperatorImpl(op) {}
+    AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<AvgPoolingImpl2D_cpu> create(const AvgPooling_Op<2> &op) {
         return std::make_unique<AvgPoolingImpl2D_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
index a599aeb7b427161eb7541829242820c0306d0d31..3743c40a706156c45e6b1e7bf5dfdd50f40ed195 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
@@ -53,7 +53,7 @@ class BatchNormImpl2DBackward_cpu
 
 class BatchNormImpl2D_cpu : public OperatorImpl {
 public:
-    BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : OperatorImpl(op) {}
+    BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<BatchNormImpl2D_cpu> create(const BatchNorm_Op<2> &op) {
         return std::make_unique<BatchNormImpl2D_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/ConcatImpl.hpp b/include/aidge/backend/cpu/operator/ConcatImpl.hpp
index d0d3e06365c524da1af485583dda6d6208ef3fb9..559d5026d3b7430489ffb1cf08ef143df013c4c4 100644
--- a/include/aidge/backend/cpu/operator/ConcatImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConcatImpl.hpp
@@ -41,7 +41,7 @@ class ConcatImplBackward_cpu
 
 class ConcatImpl_cpu : public OperatorImpl {
 public:
-    ConcatImpl_cpu(const Concat_Op& op) : OperatorImpl(op) {}
+    ConcatImpl_cpu(const Concat_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ConcatImpl_cpu> create(const Concat_Op& op) {
         return std::make_unique<ConcatImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
index f72890d8903ca4a9876809759587ed4b1ac22e67..470e189d3a9a8ce52dd067794cfd1bf6a7404696 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
@@ -40,7 +40,7 @@ class ConvDepthWiseImpl2DBackward_cpu
 
 class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
 public:
-    ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : OperatorImpl(op) {}
+    ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ConvDepthWiseImpl2D_cpu> create(const ConvDepthWise_Op<2> &op) {
         return std::make_unique<ConvDepthWiseImpl2D_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp
index 9bc2f27412f388a7fd03db06ac97c612044fab5f..5e739b06118e788f716f6e5d6a41a58cab9b5203 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp
@@ -40,7 +40,7 @@ class ConvImpl2DBackward_cpu
 
 class ConvImpl2D_cpu : public OperatorImpl {
    public:
-    ConvImpl2D_cpu(const Conv_Op<2>& op) : OperatorImpl(op) {}
+    ConvImpl2D_cpu(const Conv_Op<2>& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ConvImpl2D_cpu> create(const Conv_Op<2> &op) {
         return std::make_unique<ConvImpl2D_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp
index 73809ee81e26fff23e40763405857ddd2c95db0c..06a1ae49ffacf3fbf0ae923081d8d9cf1a5a40d6 100644
--- a/include/aidge/backend/cpu/operator/DivImpl.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl.hpp
@@ -12,35 +12,37 @@
 #ifndef AIDGE_CPU_OPERATOR_DIVIMPL_H_
 #define AIDGE_CPU_OPERATOR_DIVIMPL_H_
 
+#include <memory>
+#include <tuple>
+#include <vector>
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Div.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include <memory>
-#include <vector>
 
 namespace Aidge {
-// class Div_Op;
 
 // compute kernel registry for forward and backward
 class DivImplForward_cpu
-    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
+    // : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
+    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const std::size_t, const void*, const void*,void*)> {
 };
 class DivImplBackward_cpu
-    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
 };
 
 class DivImpl_cpu : public OperatorImpl {
 public:
-    DivImpl_cpu(const Div_Op& op) : OperatorImpl(op) {}
+    DivImpl_cpu(const Div_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<DivImpl_cpu> create(const Div_Op& op) {
         return std::make_unique<DivImpl_cpu>(op);
     }
 
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    void forward() override;
+
+    void forward() override final;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
index e2ead9ca8de3ed8328b659906336766fbfbb6a47..3cdcefa9e1c865f66b64ed527605d46af31be8af 100644
--- a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
@@ -12,42 +12,64 @@
 #ifndef AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_
 #define AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_
 
+#include <numeric>     // std::accumulate
+#include <cstddef>     // std::size_t
+#include <functional>  // std::multiplies
+
 #include "aidge/utils/Registrar.hpp"
 
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/DivImpl.hpp"
 
 namespace Aidge {
+// template <class I1, class I2, class O>
+// void DivImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
+//                                 const std::vector<std::size_t>& input2Dims,
+//                                 const std::vector<std::size_t>& outputDims,
+//                                 const void* input1_,
+//                                 const void* input2_,
+//                                 void* output_) {
+
+//     const I1* input_1 = static_cast<const I1*>(input1_);
+//     const I2* input_2 = static_cast<const I2*>(input2_);
+//     O* output = static_cast<O*>(output_);
+
+//     const std::size_t totalElements = std::accumulate(outputDims.cbegin(), outputDims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+
+// 	for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex)
+// 	{
+// 		std::vector<std::size_t> indexes = getMultiDimIndices(outputDims, oIndex);
+
+// 		std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
+// 		std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+
+//         // TODO assert if input_2 is bad?
+//         output[oIndex] = input_1[idx1] / input_2[idx2];
+//     }
+// }
+
 template <class I1, class I2, class O>
-void DivImpl_cpu_forward_kernel(std::size_t input1Length,
-                                     std::size_t input2Length,
-                                     const void* input1_,
-                                     const void* input2_,
-                                     void* output_) {
+constexpr void DivImpl_cpu_forward_kernel(const std::size_t input1size_,
+                                const std::size_t input2size_,
+                                const std::size_t output1size_,
+                                const void* input1_,
+                                const void* input2_,
+                                void* output_) {
 
     const I1* input_1 = static_cast<const I1*>(input1_);
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
-    if (input2Length == input1Length)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = input_1[i] / input_2[i];
-        }
-    }
-    else if (input2Length == 1)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = input_1[i] / input_2[0];
-        }
-    }
-    else // input_2 is 1d and of size the number of channels of input_1
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            std::size_t channelIdx = i % input2Length;
-            output[i] = input_1[i] / input_2[channelIdx];
-        }
+
+// suppose values are contiguous in memory
+    for (std::size_t i = 0; i < output1size_; ++i) {
+        const std::size_t in1_id = (input1size_ != 1) ? i : 0;
+        const std::size_t in2_id = (input2size_ != 1) ? i : 0;
+        output[i] = static_cast<O>(input_1[in1_id] / input_2[in2_id]);
     }
 }
 
+
+
 namespace {
 static Registrar<DivImplForward_cpu> registrarDivImplForward_cpu_Float32(
         {DataType::Float32, DataType::Float32, DataType::Float32},
diff --git a/include/aidge/backend/cpu/operator/ErfImpl.hpp b/include/aidge/backend/cpu/operator/ErfImpl.hpp
index 5c0a6fd49f4e2d435eed8e8baa979f59dbd84e68..1402868ea5b8cb441c12dbefaad17304fdfdc749 100644
--- a/include/aidge/backend/cpu/operator/ErfImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ErfImpl.hpp
@@ -32,7 +32,7 @@ class ErfImplBackward_cpu
 
 class ErfImpl_cpu : public OperatorImpl {
 public:
-    ErfImpl_cpu(const Erf_Op& op) : OperatorImpl(op) {}
+    ErfImpl_cpu(const Erf_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ErfImpl_cpu> create(const Erf_Op& op) {
         return std::make_unique<ErfImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp
index 86bb7fd1271e5857b595dda8efc0354851c94b7e..514cb999b3ecb3c935a773a3a356c7e8063ce3ad 100644
--- a/include/aidge/backend/cpu/operator/FCImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl.hpp
@@ -36,7 +36,7 @@ class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu,
 
 class FCImpl_cpu : public OperatorImpl {
 public:
-    FCImpl_cpu(const FC_Op &op) : OperatorImpl(op) {}
+    FCImpl_cpu(const FC_Op &op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<FCImpl_cpu> create(const FC_Op &op) {
         return std::make_unique<FCImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/GatherImpl.hpp b/include/aidge/backend/cpu/operator/GatherImpl.hpp
index 1d235ff14ca01955c268a7b061e6ecb7b2bbbb2a..fce777d0ac4d53134aa65689b6ac2ec02b805d98 100644
--- a/include/aidge/backend/cpu/operator/GatherImpl.hpp
+++ b/include/aidge/backend/cpu/operator/GatherImpl.hpp
@@ -32,7 +32,7 @@ class GatherImplBackward_cpu
 
 class GatherImpl_cpu : public OperatorImpl {
 public:
-    GatherImpl_cpu(const Gather_Op& op) : OperatorImpl(op) {}
+    GatherImpl_cpu(const Gather_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<GatherImpl_cpu> create(const Gather_Op& op) {
         return std::make_unique<GatherImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
index a9c87b4dc6299326192c9e620adc00c333597b9d..42116c52d829a8b4ba27311b3ab2d35fcea37e8b 100644
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
@@ -33,7 +33,7 @@ class LeakyReLUImplBackward_cpu
 
 class LeakyReLUImpl_cpu : public OperatorImpl {
 public:
-    LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : OperatorImpl(op) {}
+    LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<LeakyReLUImpl_cpu> create(const LeakyReLU_Op& op) {
         return std::make_unique<LeakyReLUImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
index e8654c6e9cc8fab9080bbb5ed57ea78ee0b7978c..e4b76d64baadbcb1baa7d24180c4bb13ed47215b 100644
--- a/include/aidge/backend/cpu/operator/MatMulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
@@ -23,21 +23,19 @@
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
-// class MatMul_Op;
 
-// compute kernel registry for forward and backward
 class MatMulImplForward_cpu
-    : public Registrable<MatMulImplForward_cpu, std::tuple<DataType, DataType, DataType>,
-                         void(const MatMul_Op::Attrs &, const DimSize_t, const DimSize_t,
+    : public Registrable<MatMulImplForward_cpu, std::tuple<DataType, DataType>,
+                         void(const std::size_t, const std::size_t, const std::size_t,
                               const void *, const void *, void *)> {};
 class MatMulImplBackward_cpu
-    : public Registrable<MatMulImplBackward_cpu, std::tuple<DataType, DataType, DataType>,
-                         void(const MatMul_Op::Attrs &, const DimSize_t, const DimSize_t,
+    : public Registrable<MatMulImplBackward_cpu, std::tuple<DataType, DataType>,
+                         void(const std::vector<DimSize_t>&, const std::vector<DimSize_t>&,
                               const void *, const void *, void *)> {};
 
 class MatMulImpl_cpu : public OperatorImpl {
 public:
-    MatMulImpl_cpu(const MatMul_Op &op): OperatorImpl(op) {}
+    MatMulImpl_cpu(const MatMul_Op &op): OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<MatMulImpl_cpu> create(const MatMul_Op &op) {
         return std::make_unique<MatMulImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp
index bc52779eff274379a853ea84fb839c9486652433..5045580fa599aac64f2c1414bfdf2b87ea57e313 100644
--- a/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp
@@ -12,45 +12,39 @@
 #ifndef AIDGE_CPU_OPERATOR_MATMULIMPL_FORWARD_KERNEL_H_
 #define AIDGE_CPU_OPERATOR_MATMULIMPL_FORWARD_KERNEL_H_
 
-#include "aidge/utils/Registrar.hpp"
-#include <algorithm>
-
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
 
 namespace Aidge {
 
-template <class I, class W, class O>
-void MatMulImpl_cpu_forward_kernel(const MatMul_Op::Attrs& attrs, const DimSize_t batchSize, const DimSize_t oneInputSize,
-                                   const void* input_, const void* weights_, void* output_) {
+template <class I, class O>
+void MatMulImpl_cpu_forward_kernel(const std::size_t n, const std::size_t k, const std::size_t m,
+                                    const void* input1_, const void* input2_, void* output_) {
     // FIXME: missing MatMul parameters as arguments
-    const I* input = static_cast<const I*>(input_);
-    const W* weights = static_cast<const W*>(weights_);
+    const I* input1 = static_cast<const I*>(input1_);
+    const I* input2 = static_cast<const I*>(input2_);
     O* output = static_cast<O*>(output_);
 
-
-    std::fill(output, output+(batchSize*std::get<0>(attrs)), O(0));
-
-    for (std::size_t batch = 0; batch < batchSize; ++batch) {
-        for (std::size_t out = 0; out < std::get<0>(attrs); ++out) {
-            output[out + batch*std::get<0>(attrs)] = std::inner_product(input + batch*oneInputSize,
-                                                        input + (batch + 1)*oneInputSize,
-                                                        weights + out*oneInputSize,
-                                                        output[out + batch*std::get<0>(attrs)]);
+    for (std::size_t i = 0; i < n; ++i) {
+        for (std::size_t j = 0; j < m; ++j) {
+            O sum = O(0);
+            for (std::size_t l = 0; l < k; ++l) {
+                sum += static_cast<O>(input1[i*k + l] * input2[l*m + j]);
+            }
+            output[i*m + j] = sum;
         }
     }
 }
 
-
 namespace {
 static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::MatMulImpl_cpu_forward_kernel<float, float, float>);
+        {DataType::Float32, DataType::Float32},
+        Aidge::MatMulImpl_cpu_forward_kernel<float, float>);
 static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32, DataType::Int32},
-        Aidge::MatMulImpl_cpu_forward_kernel<int, int, int>);
+        {DataType::Int32, DataType::Int32},
+        Aidge::MatMulImpl_cpu_forward_kernel<int, int>);
 static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64, DataType::Float64},
-        Aidge::MatMulImpl_cpu_forward_kernel<double, double, double>);
+        {DataType::Float64, DataType::Float64},
+        Aidge::MatMulImpl_cpu_forward_kernel<double, double>);
 }  // namespace
 
 }  // namespace Aidge
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
index 6cde34d9b123b4f83cbfce412ffa62e0144af8d4..15629b59b31f6f2228802861f6ae0d7d70b2bff9 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
@@ -38,7 +38,7 @@ class MaxPoolingImpl2DBackward_cpu
 
 class MaxPoolingImpl2D_cpu : public OperatorImpl {
 public:
-    MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : OperatorImpl(op) {}
+    MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<MaxPoolingImpl2D_cpu> create(const MaxPooling_Op<2> &op) {
         return std::make_unique<MaxPoolingImpl2D_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/MemorizeImpl.hpp b/include/aidge/backend/cpu/operator/MemorizeImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..10d18d780e1e450d1a2c58faa932e9d851a41f19
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/MemorizeImpl.hpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_MEMORIZEIMPL_H_
+#define AIDGE_CPU_OPERATOR_MEMORIZEIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Memorize.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+class MemorizeImpl_cpu : public OperatorImpl {
+public:
+    MemorizeImpl_cpu(const Memorize_Op& op) : OperatorImpl(op, "cpu") {}
+
+    static std::unique_ptr<MemorizeImpl_cpu> create(const Memorize_Op& op) {
+        return std::make_unique<MemorizeImpl_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                               const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const override final;
+    void updateConsummerProducer() override final;
+    void forward() override;
+};
+
+namespace {
+static Registrar<Memorize_Op> registrarMemorizeImpl_cpu("cpu", Aidge::MemorizeImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_MEMORIZEIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/MulImpl.hpp b/include/aidge/backend/cpu/operator/MulImpl.hpp
index f1b58e59b9ac1d3a1d34162a1054534830b8d508..230094475088c6f7802f8a8af75986ded55e9137 100644
--- a/include/aidge/backend/cpu/operator/MulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl.hpp
@@ -25,15 +25,15 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class MulImplForward_cpu
-    : public Registrable<MulImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
+    : public Registrable<MulImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
 };
 class MulImplBackward_cpu
-    : public Registrable<MulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<MulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
 };
 
 class MulImpl_cpu : public OperatorImpl {
 public:
-    MulImpl_cpu(const Mul_Op& op) : OperatorImpl(op) {}
+    MulImpl_cpu(const Mul_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<MulImpl_cpu> create(const Mul_Op& op) {
         return std::make_unique<MulImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp
index 9caef8b88af3ca779309b60eba984a72db35f84a..e1387768ea02e2a9f35790c64c7674c321a1faa7 100644
--- a/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp
@@ -14,37 +14,35 @@
 
 #include "aidge/utils/Registrar.hpp"
 
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/MulImpl.hpp"
 
 namespace Aidge {
 template <class I1, class I2, class O>
-void MulImpl_cpu_forward_kernel(std::size_t input1Length,
-                                     std::size_t input2Length,
-                                     const void* input1_,
-                                     const void* input2_,
-                                     void* output_) {
+void MulImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
+                                const std::vector<std::size_t>& input2Dims,
+                                const std::vector<std::size_t>& outputDims,
+                                const void* input1_,
+                                const void* input2_,
+                                void* output_) {
 
     const I1* input_1 = static_cast<const I1*>(input1_);
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
-    if (input2Length == input1Length)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = input_1[i] * input_2[i];
-        }
-    }
-    else if (input2Length == 1)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = input_1[i] * input_2[0];
-        }
+
+    size_t totalElements = 1;
+    for (size_t dimSize : outputDims) {
+        totalElements *= dimSize;
     }
-    else // input_2 is 1d and of size the number of channels of input_1
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            std::size_t channelIdx = i % input2Length;
-            output[i] = input_1[i] * input_2[channelIdx];
-        }
+
+	for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex) 
+	{
+		std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex);
+
+		std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
+		std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+		
+        output[oIndex] = input_1[idx1] * input_2[idx2];
     }
 }
 
diff --git a/include/aidge/backend/cpu/operator/PadImpl.hpp b/include/aidge/backend/cpu/operator/PadImpl.hpp
index 2320662710f9802878811e51ec4439bd812aea67..a1efb0f699beb7a45cc104e7c6ab723c1952a5b1 100644
--- a/include/aidge/backend/cpu/operator/PadImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl.hpp
@@ -40,7 +40,7 @@ class PadImpl2DBackward_cpu
 
 class PadImpl2D_cpu : public OperatorImpl {
 public:
-    PadImpl2D_cpu(const Pad_Op<2> &op) : OperatorImpl(op) {}
+    PadImpl2D_cpu(const Pad_Op<2> &op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<PadImpl2D_cpu> create(const Pad_Op<2> &op) {
         return std::make_unique<PadImpl2D_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/PopImpl.hpp b/include/aidge/backend/cpu/operator/PopImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..29272f5d759b5b39c6bfd704ab1e84b0777e33c5
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/PopImpl.hpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_POPIMPL_H_
+#define AIDGE_CPU_OPERATOR_POPIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Pop.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Pop_Op;
+
+// compute kernel registry for forward and backward
+class PopImplForward_cpu
+    : public Registrable<PopImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+};
+class PopImplBackward_cpu
+    : public Registrable<PopImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+};
+
+class PopImpl_cpu : public OperatorImpl {
+public:
+    PopImpl_cpu(const Pop_Op& op) : OperatorImpl(op, "cpu") {}
+
+    static std::unique_ptr<PopImpl_cpu> create(const Pop_Op& op) {
+        return std::make_unique<PopImpl_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+static Registrar<Pop_Op> registrarPopImpl_cpu("cpu", Aidge::PopImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_POPIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/PowImpl.hpp b/include/aidge/backend/cpu/operator/PowImpl.hpp
index d3cafa7e7380e31dd331950e381e08210c3f3a4c..3d63160aee4a112d599effd6c16d39f4a6da7f6d 100644
--- a/include/aidge/backend/cpu/operator/PowImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl.hpp
@@ -25,15 +25,15 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class PowImplForward_cpu
-    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
+    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
 };
 class PowImplBackward_cpu
-    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
 };
 
 class PowImpl_cpu : public OperatorImpl {
 public:
-    PowImpl_cpu(const Pow_Op& op) : OperatorImpl(op) {}
+    PowImpl_cpu(const Pow_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<PowImpl_cpu> create(const Pow_Op& op) {
         return std::make_unique<PowImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
index c9c5db7e9aef07d24ba8f80c94b8f2494865e004..1146cfa77464f8bd1c33a0ec0113415dcf599b53 100644
--- a/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
@@ -15,39 +15,36 @@
 #include "aidge/utils/Registrar.hpp"
 #include <cmath>
 
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
 
 namespace Aidge {
 template <class I1, class I2, class O>
-void PowImpl_cpu_forward_kernel(std::size_t input1Length,
-                                     std::size_t input2Length,
-                                     const void* input1_,
-                                     const void* input2_,
-                                     void* output_) {
+void PowImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
+                                const std::vector<std::size_t>& input2Dims,
+                                const std::vector<std::size_t>& outputDims,
+                                const void* input1_,
+                                const void* input2_,
+                                void* output_) {
 
     const I1* input_1 = static_cast<const I1*>(input1_);
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
 
-    if (input2Length == input1Length)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = std::pow(input_1[i], input_2[i]);
-        }
-    }
-    else if (input2Length == 1)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = std::pow(input_1[i], input_2[0]);
-        }
-    }
-    else // input_2 is 1d and of size the number of channels of input_1
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            std::size_t channelIdx = i % input2Length;
-            output[i] = std::pow(input_1[i], input_2[channelIdx]);
-        }
+    size_t totalElements = 1;
+    for (size_t dimSize : outputDims) {
+        totalElements *= dimSize;
     }
+
+	for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex) 
+	{
+		std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex);
+
+		std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
+		std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+		
+        output[oIndex] = std::pow(input_1[idx1], input_2[idx2]);
+	}
 }
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/ProducerImpl.hpp b/include/aidge/backend/cpu/operator/ProducerImpl.hpp
deleted file mode 100644
index f1fc7a758e4163ef77755ea26f9e18043379c3a0..0000000000000000000000000000000000000000
--- a/include/aidge/backend/cpu/operator/ProducerImpl.hpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_
-#define AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_
-
-#include <memory>
-
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/utils/Types.h"
-
-namespace Aidge {
-class ProducerImpl_cpu : public OperatorImpl {
-public:
-    ProducerImpl_cpu(const Producer_Op &op) : OperatorImpl(op) {}
-
-    static std::unique_ptr<ProducerImpl_cpu> create(const Producer_Op &op) {
-        return std::make_unique<ProducerImpl_cpu>(op);
-    }
-
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-
-    inline void forward() noexcept override final {}
-
-    inline void backward() noexcept override final {}
-};
-
-namespace {
-static Registrar<Producer_Op> registrarProducerImpl_cpu("cpu", Aidge::ProducerImpl_cpu::create);
-}  // namespace
-}  // namespace Aidge
-
-#endif /* AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl.hpp b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
index 7aff29376a5a5b8e742fe89830e2bdb193e43f96..1c87fe6d80b3d571c55e4355d8b5ef703a2133e4 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
@@ -35,7 +35,7 @@ class ReLUImplBackward_cpu
 
 class ReLUImpl_cpu : public OperatorImpl {
 public:
-    ReLUImpl_cpu(const ReLU_Op& op) : OperatorImpl(op) {}
+    ReLUImpl_cpu(const ReLU_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ReLUImpl_cpu> create(const ReLU_Op& op) {
         return std::make_unique<ReLUImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp
index 955099a6fe76352e6ea692b99a2a2d1561a30a6d..aa533786d3ce5b6f5cd501b6ba74b1be2823d407 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp
@@ -25,6 +25,7 @@ void ReLUImpl_cpu_forward_kernel(std::size_t inputLenght,
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
+//#pragma omp parallel for if (inputLenght > 1024)
     for (std::size_t i = 0; i < inputLenght; ++i) {
         output[i] = input[i] > 0 ? input[i] : 0;
     }
diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
index 9b85eb812caffca3820a711d46775e1134db863f..e2b7288320e3e57495044381c34c5b1be1d3c243 100644
--- a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
@@ -25,55 +25,22 @@
 namespace Aidge {
 // class ReduceMean_Op;
 
-// compute kernel registry for forward and backward
-// DIM 1
-class ReduceMeanImpl1DForward_cpu
-    : public Registrable<ReduceMeanImpl1DForward_cpu,
+// Every DIM
+class ReduceMeanImplForward_cpu
+    : public Registrable<ReduceMeanImplForward_cpu,
                          std::tuple<DataType, DataType>,
-                         void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+                         void(const ReduceMean_Op::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
 class ReduceMeanImpl1DBackward_cpu
     : public Registrable<ReduceMeanImpl1DBackward_cpu,
                          std::tuple<DataType, DataType>,
-                         void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
+                         void(const ReduceMean_Op::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
 
-// DIM 2
-class ReduceMeanImpl2DForward_cpu
-    : public Registrable<ReduceMeanImpl2DForward_cpu,
-                         std::tuple<DataType, DataType>,
-                         void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
-class ReduceMeanImpl2DBackward_cpu
-    : public Registrable<ReduceMeanImpl2DBackward_cpu,
-                         std::tuple<DataType, DataType>,
-                         void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
-// DIM 3
-class ReduceMeanImpl3DForward_cpu
-    : public Registrable<ReduceMeanImpl3DForward_cpu,
-                         std::tuple<DataType, DataType>,
-                         void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
-class ReduceMeanImpl3DBackward_cpu
-    : public Registrable<ReduceMeanImpl3DBackward_cpu,
-                         std::tuple<DataType, DataType>,
-                         void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
-
-class ReduceMeanImpl1D_cpu : public OperatorImpl {
-   public:
-    ReduceMeanImpl1D_cpu(const ReduceMean_Op<1>& op) : OperatorImpl(op) {}
-
-    static std::unique_ptr<ReduceMeanImpl1D_cpu> create(const ReduceMean_Op<1> &op) {
-        return std::make_unique<ReduceMeanImpl1D_cpu>(op);
-    }
-
-   public:
-    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    void forward() override;
-};
-
-class ReduceMeanImpl2D_cpu : public OperatorImpl {
+class ReduceMeanImpl_cpu : public OperatorImpl {
    public:
-    ReduceMeanImpl2D_cpu(const ReduceMean_Op<2>& op) : OperatorImpl(op) {}
+    ReduceMeanImpl_cpu(const ReduceMean_Op& op) : OperatorImpl(op, "cpu") {}
 
-    static std::unique_ptr<ReduceMeanImpl2D_cpu> create(const ReduceMean_Op<2> &op) {
-        return std::make_unique<ReduceMeanImpl2D_cpu>(op);
+    static std::unique_ptr<ReduceMeanImpl_cpu> create(const ReduceMean_Op &op) {
+        return std::make_unique<ReduceMeanImpl_cpu>(op);
     }
 
    public:
@@ -81,23 +48,80 @@ class ReduceMeanImpl2D_cpu : public OperatorImpl {
     void forward() override;
 };
 
-class ReduceMeanImpl3D_cpu : public OperatorImpl {
-   public:
-    ReduceMeanImpl3D_cpu(const ReduceMean_Op<3>& op) : OperatorImpl(op) {}
-
-    static std::unique_ptr<ReduceMeanImpl3D_cpu> create(const ReduceMean_Op<3> &op) {
-        return std::make_unique<ReduceMeanImpl3D_cpu>(op);
-    }
-
-   public:
-    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    void forward() override;
-};
+// // compute kernel registry for forward and backward
+// // DIM 1
+// class ReduceMeanImpl1DForward_cpu
+//     : public Registrable<ReduceMeanImpl1DForward_cpu,
+//                          std::tuple<DataType, DataType>,
+//                          void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+// class ReduceMeanImpl1DBackward_cpu
+//     : public Registrable<ReduceMeanImpl1DBackward_cpu,
+//                          std::tuple<DataType, DataType>,
+//                          void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
+
+// // DIM 2
+// class ReduceMeanImpl2DForward_cpu
+//     : public Registrable<ReduceMeanImpl2DForward_cpu,
+//                          std::tuple<DataType, DataType>,
+//                          void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+// class ReduceMeanImpl2DBackward_cpu
+//     : public Registrable<ReduceMeanImpl2DBackward_cpu,
+//                          std::tuple<DataType, DataType>,
+//                          void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
+// // DIM 3
+// class ReduceMeanImpl3DForward_cpu
+//     : public Registrable<ReduceMeanImpl3DForward_cpu,
+//                          std::tuple<DataType, DataType>,
+//                          void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+// class ReduceMeanImpl3DBackward_cpu
+//     : public Registrable<ReduceMeanImpl3DBackward_cpu,
+//                          std::tuple<DataType, DataType>,
+//                          void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+
+// class ReduceMeanImpl1D_cpu : public OperatorImpl {
+//    public:
+//     ReduceMeanImpl1D_cpu(const ReduceMean_Op<1>& op) : OperatorImpl(op, "cpu") {}
+
+//     static std::unique_ptr<ReduceMeanImpl1D_cpu> create(const ReduceMean_Op<1> &op) {
+//         return std::make_unique<ReduceMeanImpl1D_cpu>(op);
+//     }
+
+//    public:
+//     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+//     void forward() override;
+// };
+
+// class ReduceMeanImpl2D_cpu : public OperatorImpl {
+//    public:
+//     ReduceMeanImpl2D_cpu(const ReduceMean_Op<2>& op) : OperatorImpl(op, "cpu") {}
+
+//     static std::unique_ptr<ReduceMeanImpl2D_cpu> create(const ReduceMean_Op<2> &op) {
+//         return std::make_unique<ReduceMeanImpl2D_cpu>(op);
+//     }
+
+//    public:
+//     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+//     void forward() override;
+// };
+
+// class ReduceMeanImpl3D_cpu : public OperatorImpl {
+//    public:
+//     ReduceMeanImpl3D_cpu(const ReduceMean_Op<3>& op) : OperatorImpl(op, "cpu") {}
+
+//     static std::unique_ptr<ReduceMeanImpl3D_cpu> create(const ReduceMean_Op<3> &op) {
+//         return std::make_unique<ReduceMeanImpl3D_cpu>(op);
+//     }
+
+//    public:
+//     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+//     void forward() override;
+// };
 namespace {
 // add cpu backend to ReduceMean_Op<2> implementation registry
-static Registrar<ReduceMean_Op<1>> registrarReduceMeanImpl1D_cpu("cpu", Aidge::ReduceMeanImpl1D_cpu::create);
-static Registrar<ReduceMean_Op<2>> registrarReduceMeanImpl2D_cpu("cpu", Aidge::ReduceMeanImpl2D_cpu::create);
-static Registrar<ReduceMean_Op<3>> registrarReduceMeanImpl3D_cpu("cpu", Aidge::ReduceMeanImpl3D_cpu::create);
+static Registrar<ReduceMean_Op> registrarReduceMeanImpl_cpu("cpu", Aidge::ReduceMeanImpl_cpu::create);
+// static Registrar<ReduceMean_Op<1>> registrarReduceMeanImpl1D_cpu("cpu", Aidge::ReduceMeanImpl1D_cpu::create);
+// static Registrar<ReduceMean_Op<2>> registrarReduceMeanImpl2D_cpu("cpu", Aidge::ReduceMeanImpl2D_cpu::create);
+// static Registrar<ReduceMean_Op<3>> registrarReduceMeanImpl3D_cpu("cpu", Aidge::ReduceMeanImpl3D_cpu::create);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp
index 46eb61f2f03acd47d74725ade1425a92f028690c..d7a967e84f53924a4b050ed79d1220f9bc79232e 100644
--- a/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp
@@ -12,10 +12,12 @@
 #ifndef AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_
 #define AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_
 
-#include <cstddef>
-#include <algorithm>   // std::copy, std::for_each
-#include <numeric>     //std::accumulate
+#include <algorithm>   // std::for_each
+#include <cstddef>     // std::size_t
+#include <cstdint>     // std::int32_t
 #include <functional>  //std::multiplies
+#include <numeric>     //std::accumulate
+#include <vector>
 
 #include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
 #include "aidge/data/Data.hpp"
@@ -23,8 +25,8 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-template <class I, class O, DimSize_t DIM>
-void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs& attrs,
+template <class I, class O>
+void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op::Attrs& attrs,
                                      const std::vector<DimSize_t>& inputDims,
                                      const void* input_,
                                      void* output_) {
@@ -32,14 +34,15 @@ void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs&
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
+    const std::vector<std::int32_t>& axes = std::get<0>(attrs);
     const std::size_t nb_dims = inputDims.size();
     const std::size_t totalElements = std::accumulate(inputDims.cbegin(), inputDims.cend(), 1, std::multiplies<std::size_t>());
 
-    if (DIM == 1) {
-        const std::size_t stride_pre = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + std::get<0>(attrs)[0], 1, std::multiplies<std::size_t>());
-        const std::size_t stride_post = std::accumulate(inputDims.crbegin(), inputDims.crbegin() + nb_dims -1 - std::get<0>(attrs)[0], 1, std::multiplies<std::size_t>());
+    if (axes.size() == 1) {
+        const std::size_t stride_pre = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + axes[0], 1, std::multiplies<std::size_t>());
+        const std::size_t stride_post = std::accumulate(inputDims.crbegin(), inputDims.crbegin() + nb_dims -1 - axes[0], 1, std::multiplies<std::size_t>());
 
-        const std::size_t dim_i = inputDims[std::get<0>(attrs)[0]];
+        const std::size_t dim_i = inputDims[axes[0]];
         for (std::size_t pre = 0; pre < stride_pre; ++pre) {
             for (std::size_t post = 0; post < stride_post; ++post) {
                 const std::size_t idx_i = pre * dim_i * stride_post + post;
@@ -68,7 +71,7 @@ void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs&
         const I* inputAccumulation = input;
         I* outputAccumulation = nullptr;
 
-        for (const auto& axisInt : std::get<0>(attrs)) {
+        for (const auto& axisInt : axes) {
             const std::size_t a = static_cast<std::size_t>(axisInt);
             outputElements /= inputDims[a];
             outputAccumulation = new I[outputElements];
@@ -93,7 +96,7 @@ void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs&
         // Copy elements from inputAccumulation to output while dividing by divisor
         I divisor = totalElements / outputElements;
         std::transform(inputAccumulation, inputAccumulation + outputElements, output,
-                    [divisor](int element) { return element / divisor; });
+                    [divisor](I element) { return element / divisor; });
         if (outputAccumulation) {
             delete[] outputAccumulation;
         }
@@ -103,29 +106,36 @@ void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs&
 }
 
 namespace {
-// DIM = 1
-static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,1>);
-static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,1>);
-static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,1>);
-
-// DIM = 2
-static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,2>);
-static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,2>);
-static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,2>);
-
-// DIM = 3
-static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,3>);
-static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,3>);
-static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,3>);
+static Registrar<ReduceMeanImplForward_cpu> registrarReduceMeanImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float>);
+static Registrar<ReduceMeanImplForward_cpu> registrarReduceMeanImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int>);
+static Registrar<ReduceMeanImplForward_cpu> registrarReduceMeanImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double>);
+
+// // DIM = 1
+// static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float32(
+//         {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,1>);
+// static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Int32(
+//         {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,1>);
+// static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float64(
+//         {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,1>);
+
+// // DIM = 2
+// static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float32(
+//         {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,2>);
+// static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Int32(
+//         {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,2>);
+// static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float64(
+//         {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,2>);
+
+// // DIM = 3
+// static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float32(
+//         {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,3>);
+// static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Int32(
+//         {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,3>);
+// static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float64(
+//         {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,3>);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/ReshapeImpl.hpp b/include/aidge/backend/cpu/operator/ReshapeImpl.hpp
index d5754b34e952d52b2071744e9f8e863074ef9fa3..d2d819e8d56df59437904aa9b4ae91185c8288f2 100644
--- a/include/aidge/backend/cpu/operator/ReshapeImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReshapeImpl.hpp
@@ -32,7 +32,7 @@ class ReshapeImplBackward_cpu
 
 class ReshapeImpl_cpu : public OperatorImpl {
 public:
-    ReshapeImpl_cpu(const Reshape_Op& op) : OperatorImpl(op) {}
+    ReshapeImpl_cpu(const Reshape_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ReshapeImpl_cpu> create(const Reshape_Op& op) {
         return std::make_unique<ReshapeImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/ScalingImpl.hpp b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
index bbcb4553d7aa4b17d733e0f455373bebb9c3581c..088625e963b158811aad85665a25b68bf2892bb9 100644
--- a/include/aidge/backend/cpu/operator/ScalingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
@@ -34,7 +34,7 @@ class ScalingImplBackward_cpu
 
 class ScalingImpl_cpu : public OperatorImpl {
 public:
-    ScalingImpl_cpu(const Scaling_Op& op) : OperatorImpl(op) {}
+    ScalingImpl_cpu(const Scaling_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ScalingImpl_cpu> create(const Scaling_Op& op) {
         return std::make_unique<ScalingImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f54a6c84aa83414cbe8a7a1713f36dd3311dda3f
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_SIGMOIDIMPL_H_
+#define AIDGE_CPU_OPERATOR_SIGMOIDIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Sigmoid.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Sigmoid_Op;
+
+// compute kernel registry for forward and backward
+class SigmoidImplForward_cpu
+    : public Registrable<SigmoidImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+};
+class SigmoidImplBackward_cpu
+    : public Registrable<SigmoidImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+};
+
+class SigmoidImpl_cpu : public OperatorImpl {
+public:
+    SigmoidImpl_cpu(const Sigmoid_Op& op) : OperatorImpl(op, "cpu") {}
+
+    static std::unique_ptr<SigmoidImpl_cpu> create(const Sigmoid_Op& op) {
+        return std::make_unique<SigmoidImpl_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+static Registrar<Sigmoid_Op> registrarSigmoidImpl_cpu("cpu", Aidge::SigmoidImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_SIGMOIDIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a53650942540e6368855ffe19e2f7f651ab5b6bc
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_SIGMOIDIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_SIGMOIDIMPL_FORWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/SigmoidImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void SigmoidImpl_cpu_forward_kernel(std::size_t inputLenght,
+                                     const void* input_,
+                                     void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+
+//#pragma omp parallel for if (inputLenght > 1024)
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        output[i] = static_cast<O>(1.0) / (static_cast<O>(1.0) + std::exp(-input[i]));
+    }
+}
+
+namespace {
+static Registrar<SigmoidImplForward_cpu> registrarSigmoidImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::SigmoidImpl_cpu_forward_kernel<float, float>);
+static Registrar<SigmoidImplForward_cpu> registrarSigmoidImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::SigmoidImpl_cpu_forward_kernel<double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_SIGMOIDIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/SliceImpl.hpp b/include/aidge/backend/cpu/operator/SliceImpl.hpp
index 1cba5906064c51a4f0da2f1f3682b0828a080d43..72d6105388924dc1553cbeba2124da66d804980f 100644
--- a/include/aidge/backend/cpu/operator/SliceImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SliceImpl.hpp
@@ -40,7 +40,7 @@ class SliceImplBackward_cpu
 
 class SliceImpl_cpu : public OperatorImpl {
 public:
-    SliceImpl_cpu(const Slice_Op& op) : OperatorImpl(op) {}
+    SliceImpl_cpu(const Slice_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<SliceImpl_cpu> create(const Slice_Op& op) {
         return std::make_unique<SliceImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
index 005b52f646f9e9ddf14af09cc22d9e2a44ba6dd4..9eb5323702358650f3af91b46a8a1a0872b02675 100644
--- a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
@@ -33,7 +33,7 @@ class SoftmaxImplBackward_cpu
 
 class SoftmaxImpl_cpu : public OperatorImpl {
 public:
-    SoftmaxImpl_cpu(const Softmax_Op& op) : OperatorImpl(op) {}
+    SoftmaxImpl_cpu(const Softmax_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<SoftmaxImpl_cpu> create(const Softmax_Op& op) {
         return std::make_unique<SoftmaxImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/SqrtImpl.hpp b/include/aidge/backend/cpu/operator/SqrtImpl.hpp
index a2c9a03069c1ea6be482b599df98bba733a1bd59..33fa7b5bc802005112a2b47357312883706e43e9 100644
--- a/include/aidge/backend/cpu/operator/SqrtImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SqrtImpl.hpp
@@ -34,7 +34,7 @@ class SqrtImplBackward_cpu
 
 class SqrtImpl_cpu : public OperatorImpl {
 public:
-    SqrtImpl_cpu(const Sqrt_Op& op) : OperatorImpl(op) {}
+    SqrtImpl_cpu(const Sqrt_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<SqrtImpl_cpu> create(const Sqrt_Op& op) {
         return std::make_unique<SqrtImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/SubImpl.hpp b/include/aidge/backend/cpu/operator/SubImpl.hpp
index 2d4c22f0d7f5e850ce805e0c78fb3e64bfa8f42b..2d957aa67b3061994f7fb2bf9550e4d5338d3967 100644
--- a/include/aidge/backend/cpu/operator/SubImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SubImpl.hpp
@@ -25,15 +25,15 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class SubImplForward_cpu
-    : public Registrable<SubImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
+    : public Registrable<SubImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
 };
 class SubImplBackward_cpu
-    : public Registrable<SubImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<SubImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
 };
 
 class SubImpl_cpu : public OperatorImpl {
 public:
-    SubImpl_cpu(const Sub_Op& op) : OperatorImpl(op) {}
+    SubImpl_cpu(const Sub_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<SubImpl_cpu> create(const Sub_Op& op) {
         return std::make_unique<SubImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp
index 08f2e24fa38d2739943279666187a55d7076a89b..19b0bd21de129ed303151987323234364ce5f6f2 100644
--- a/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp
@@ -14,39 +14,35 @@
 
 #include "aidge/utils/Registrar.hpp"
 
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
 
+
 namespace Aidge {
 template <class I1, class I2, class O>
-void SubImpl_cpu_forward_kernel(std::size_t input1Length,
-                                     std::size_t input2Length,
-                                     const void* input1_,
-                                     const void* input2_,
-                                     void* output_) {
+void SubImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
+                                const std::vector<std::size_t>& input2Dims,
+                                const std::vector<std::size_t>& outputDims,
+                                const void* input1_,
+                                const void* input2_,
+                                void* output_) {
 
     const I1* input_1 = static_cast<const I1*>(input1_);
     const I2* input_2 = static_cast<const I2*>(input2_);
     O* output = static_cast<O*>(output_);
 
-    if (input2Length == input1Length)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = input_1[i] - input_2[i];
-        }
-    }
-    else if (input2Length == 1)
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            output[i] = input_1[i] - input_2[0];
-        }
-    }
-    else // input_2 is 1d and of size the number of channels of input_1
-    {
-        for (std::size_t i = 0; i < input1Length; ++i) {
-            std::size_t channelIdx = i % input2Length;
-            output[i] = input_1[i] - input_2[channelIdx];
-        }
+    size_t totalElements = 1;
+    for (size_t dimSize : outputDims) {
+        totalElements *= dimSize;
     }
+
+	for (std::size_t oIndex = 0; oIndex < totalElements; ++oIndex) 
+	{
+		std::vector<size_t> indexes = getMultiDimIndices(outputDims, oIndex);
+		std::size_t idx1 = getFlattenedIndex(input1Dims, indexes);
+		std::size_t idx2 = getFlattenedIndex(input2Dims, indexes);
+        output[oIndex] = input_1[idx1] - input_2[idx2];
+	}
 }
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/TanhImpl.hpp b/include/aidge/backend/cpu/operator/TanhImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4169b1a533a8b2382644246ea295a683e6f83f1d
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/TanhImpl.hpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_TANHIMPL_H_
+#define AIDGE_CPU_OPERATOR_TANHIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Tanh.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Tanh_Op;
+
+// compute kernel registry for forward and backward
+class TanhImplForward_cpu
+    : public Registrable<TanhImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+};
+class TanhImplBackward_cpu
+    : public Registrable<TanhImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+};
+
+class TanhImpl_cpu : public OperatorImpl {
+public:
+    TanhImpl_cpu(const Tanh_Op& op) : OperatorImpl(op, "cpu") {}
+
+    static std::unique_ptr<TanhImpl_cpu> create(const Tanh_Op& op) {
+        return std::make_unique<TanhImpl_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+static Registrar<Tanh_Op> registrarTanhImpl_cpu("cpu", Aidge::TanhImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_TANHIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9e57b6dfcb0da322f5b21944fb10ec7a10cd0ab8
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_TANHIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_TANHIMPL_FORWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/TanhImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void TanhImpl_cpu_forward_kernel(std::size_t inputLenght,
+                                     const void* input_,
+                                     void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+
+//#pragma omp parallel for if (inputLenght > 1024)
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        output[i] = std::tanh(input[i]);
+    }
+}
+
+namespace {
+static Registrar<TanhImplForward_cpu> registrarTanhImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::TanhImpl_cpu_forward_kernel<float, float>);
+static Registrar<TanhImplForward_cpu> registrarTanhImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::TanhImpl_cpu_forward_kernel<double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_TANHIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/TransposeImpl.hpp b/include/aidge/backend/cpu/operator/TransposeImpl.hpp
index 712e672752648f5ff8a3c073f6c81bbe7cc85d9d..3c6913dd71d6642d8b76198a272d64bfaba833e8 100644
--- a/include/aidge/backend/cpu/operator/TransposeImpl.hpp
+++ b/include/aidge/backend/cpu/operator/TransposeImpl.hpp
@@ -57,7 +57,7 @@ class TransposeImpl6DBackward_cpu
 
 class TransposeImpl2D_cpu : public OperatorImpl {
 public:
-    TransposeImpl2D_cpu(const Transpose_Op<2>& op) : OperatorImpl(op) {}
+    TransposeImpl2D_cpu(const Transpose_Op<2>& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<TransposeImpl2D_cpu> create(const Transpose_Op<2>& op) {
         return std::make_unique<TransposeImpl2D_cpu>(op);
@@ -68,7 +68,7 @@ public:
 };
 class TransposeImpl3D_cpu : public OperatorImpl {
 public:
-    TransposeImpl3D_cpu(const Transpose_Op<3>& op) : OperatorImpl(op) {}
+    TransposeImpl3D_cpu(const Transpose_Op<3>& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<TransposeImpl3D_cpu> create(const Transpose_Op<3>& op) {
         return std::make_unique<TransposeImpl3D_cpu>(op);
@@ -79,7 +79,7 @@ public:
 };
 class TransposeImpl4D_cpu : public OperatorImpl {
 public:
-    TransposeImpl4D_cpu(const Transpose_Op<4>& op) : OperatorImpl(op) {}
+    TransposeImpl4D_cpu(const Transpose_Op<4>& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<TransposeImpl4D_cpu> create(const Transpose_Op<4>& op) {
         return std::make_unique<TransposeImpl4D_cpu>(op);
@@ -90,7 +90,7 @@ public:
 };
 class TransposeImpl5D_cpu : public OperatorImpl {
 public:
-    TransposeImpl5D_cpu(const Transpose_Op<5>& op) : OperatorImpl(op) {}
+    TransposeImpl5D_cpu(const Transpose_Op<5>& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<TransposeImpl5D_cpu> create(const Transpose_Op<5>& op) {
         return std::make_unique<TransposeImpl5D_cpu>(op);
@@ -101,7 +101,7 @@ public:
 };
 class TransposeImpl6D_cpu : public OperatorImpl {
 public:
-    TransposeImpl6D_cpu(const Transpose_Op<6>& op) : OperatorImpl(op) {}
+    TransposeImpl6D_cpu(const Transpose_Op<6>& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<TransposeImpl6D_cpu> create(const Transpose_Op<6>& op) {
         return std::make_unique<TransposeImpl6D_cpu>(op);
diff --git a/src/data/Broadcasting.cpp b/src/data/Broadcasting.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..22977aa772e3f3f4810a59ff1fc024cc21c66bd1
--- /dev/null
+++ b/src/data/Broadcasting.cpp
@@ -0,0 +1,46 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
+
+std::vector<std::size_t> Aidge::getBroadcastedDims(const std::vector<std::size_t>& outputDims, const std::vector<std::size_t>& dimsToBroadcast){
+    std::vector<std::size_t> broadcastedDims(outputDims.size(), 1);
+		for(int j=dimsToBroadcast.size()-1; j>=0; --j)
+		{
+			std::size_t idx = outputDims.size() - (dimsToBroadcast.size()-j);
+			broadcastedDims[idx] = dimsToBroadcast[j];
+		}
+    return broadcastedDims;
+}
+
+std::vector<std::size_t> Aidge::getMultiDimIndices(const std::vector<std::size_t>& dimensions, std::size_t idx){
+    std::vector<std::size_t> indices(dimensions.size(), 0);
+
+    for (int i = dimensions.size() - 1; i >= 0; --i) {
+        indices[i] = idx % dimensions[i];
+        idx /= dimensions[i];
+    }
+
+    return indices;
+}
+
+std::size_t Aidge::getFlattenedIndex(const std::vector<std::size_t>& dimensions, const std::vector<std::size_t>& indices){
+    std::size_t flattenedIdx = 0;
+    std::size_t stride = 1;
+
+    for (int i = dimensions.size() - 1; i >= 0; --i) {
+        std::size_t idx = dimensions[i]>1 ? indices[i] : 0;
+        flattenedIdx += idx * stride;
+        stride *= dimensions[i];
+    }
+    return flattenedIdx;
+}
+
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index 3b53eaf3b88fb418746ab5a7a2297a15606974d3..abd40bd6af06c52945815fd6245e661710fa1127 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -9,17 +9,18 @@
  *
  ********************************************************************************/
 
+#include "aidge/backend/cpu/operator/AddImpl.hpp"
+
 #include <cassert>
 #include <numeric> // std::accumulate
 #include <vector>
 
-#include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include "aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
-
-#include "aidge/backend/cpu/operator/AddImpl.hpp"
-#include "aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 Aidge::NbElts_t  Aidge::AddImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
@@ -27,15 +28,18 @@ Aidge::NbElts_t  Aidge::AddImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex
 }
 
 void  Aidge::AddImpl_cpu::forward() {
-    assert(mOp.getRawInput(0) && "missing input in Add operator");
-    DataType datatypeFirstInput = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType();
-    for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) {
-        assert(mOp.getRawInput(i) && "missing input in Add operator");
-        assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dataType() == datatypeFirstInput);
+    const auto& opTensor = static_cast<const OperatorTensor&>(mOp);
+    AIDGE_ASSERT(opTensor.getInput(0)->hasImpl(), "cannot run Add forward because the 0-th input has no implementation.");
+    assert(opTensor.getInput(0) && "missing input in Add operator");
+    DataType datatypeFirstInput = opTensor.getInput(0)->dataType();
+    for (IOIndex_t i = 1; i < opTensor.nbInputs(); ++i) {
+        AIDGE_ASSERT(opTensor.getInput(i)->hasImpl(), "cannot run Add forward because the {}-th input has no implementation.", i);
+        assert(opTensor.getInput(i) && "missing input in Add operator");
+        assert(opTensor.getInput(i)->dataType() == datatypeFirstInput);
     }
 
     // Find the correct kernel type
-    const auto outputDataType = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType();
+    const auto outputDataType = opTensor.getOutput(0)->dataType();
     const Registrar<AddImplForward_cpu>::registrar_key registrarKey = {
         datatypeFirstInput,
         outputDataType};
@@ -55,15 +59,26 @@ void  Aidge::AddImpl_cpu::forward() {
     // TODO: right now, if needed, memory will be allocated/deallocated at each
     // call to forward(). We might put the following shared_ptr as members of
     // this class to avoid that.
+    const std::size_t nbDims = opTensor.getOutput(0)->nbDims();
+    std::vector<std::vector<std::size_t>> inputsDims;
     std::vector<const void*> opInputs;
-    std::vector<std::shared_ptr<Tensor>> inputsFallback(mOp.nbInputs());
-    for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
-        const auto& input = std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->refCastFrom(inputsFallback[i], *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
+    std::vector<std::shared_ptr<Tensor>> inputsFallback(opTensor.nbInputs());
+    for (IOIndex_t i = 0; i < opTensor.nbInputs(); ++i) {
+        std::vector<std::size_t> inputDims(nbDims, 1);
+        auto dims = opTensor.getInput(i)->dims();
+		for(std::size_t j=dims.size()-1; j+1>0; --j)
+		{
+			std::size_t idx = nbDims - (dims.size()-j);
+			inputDims[idx] = dims[j];
+		}
+        inputsDims.push_back(inputDims);
+        const auto& input = opTensor.getInput(i)->refCastFrom(inputsFallback[i], *opTensor.getOutput(0));
         opInputs.push_back(input.getImpl()->rawPtr());
     }
 
-    // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-               opInputs,
-               getCPUPtr(mOp.getRawOutput(0)));
+    kernelFunc(opInputs,
+               inputsDims,
+               opTensor.getOutput(0)->size(),
+               opTensor.getOutput(0)->dims(),
+               getCPUPtr(opTensor.getRawOutput(0)));
 }
diff --git a/src/operator/ConcatImpl.cpp b/src/operator/ConcatImpl.cpp
index ceefb9031f279be417a8ab0485567a56edea7824..e142b79a8aad5a99a65fdf38de630f3b5668c804 100644
--- a/src/operator/ConcatImpl.cpp
+++ b/src/operator/ConcatImpl.cpp
@@ -87,4 +87,4 @@ void  Aidge::ConcatImpl_cpu::forward() {
                getCPUPtr(mOp.getRawOutput(0)));
 }
 
-void  Aidge::ConcatImpl_cpu::backward() { printf("Not implemented yet.\n"); }
\ No newline at end of file
+void  Aidge::ConcatImpl_cpu::backward() { fmt::print("Not implemented yet.\n"); }
\ No newline at end of file
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index b849142dd3abe0131fb0c6c448530a7669ce27dc..34ea7b37ec9929908192bde6f31d84ae581640a2 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -28,17 +28,19 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputI
 }
 
 void Aidge::ConvImpl2D_cpu::forward() {
+    const auto& opTensor = static_cast<const OperatorTensor&>(mOp);
+
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getRawInput(0) && "missing input #0");
     assert(mOp.getRawInput(1) && "missing input #1");
     assert(mOp.getRawInput(2) && "missing input #2");
 
     // Find the correct kernel type
-    const auto outputDataType = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType();
+    const auto outputDataType = opTensor.getOutput(0)->dataType();
     const Registrar<ConvImpl2DForward_cpu>::registrar_key registrarKey = {
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->dataType(),
+        opTensor.getInput(0)->dataType(),
+        opTensor.getInput(1)->dataType(),
+        opTensor.getInput(2)->dataType(),
         outputDataType};
 
     Registrar<ConvImpl2DForward_cpu>::registrar_type kernelFunc;
@@ -57,12 +59,12 @@ void Aidge::ConvImpl2D_cpu::forward() {
     // call to forward(). We might put the following shared_ptr as members of
     // this class to avoid that.
     std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback;
-    const auto& input0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->refCastFrom(input0Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
-    const auto& input1 = std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->refCastFrom(input1Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
-    const auto& input2 = std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->refCastFrom(input2Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
+    const auto& input0 = opTensor.getInput(0)->refCastFrom(input0Fallback, *opTensor.getOutput(0));
+    const auto& input1 = opTensor.getInput(1)->refCastFrom(input1Fallback, *opTensor.getOutput(0));
+    const auto& input2 = opTensor.getInput(2)->refCastFrom(input2Fallback, *opTensor.getOutput(0));
 
     // Call kernel
-    kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
+    kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), opTensor.getInput(0)->template dims<4>(),
         input0.getImpl()->rawPtr(), input1.getImpl()->rawPtr(), input2.getImpl()->rawPtr(),
         getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp
index f5cde077bd5a414d8b9add8b8b8715952a27ad01..8e2118e9e78fd364189769ead2eb01f1c55b3c58 100644
--- a/src/operator/DivImpl.cpp
+++ b/src/operator/DivImpl.cpp
@@ -9,18 +9,15 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
+#include <memory>
 #include <vector>
 
-#include "aidge/operator/Div.hpp"
-#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
-
 #include "aidge/backend/cpu/operator/DivImpl.hpp"
 #include "aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
 
 Aidge::NbElts_t Aidge::DivImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
@@ -28,16 +25,140 @@ Aidge::NbElts_t Aidge::DivImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_
 }
 
 void Aidge::DivImpl_cpu::forward() {
+    // Find the correct kernel type
+    // auto kernelFunc = Registrar<DivImplForward_cpu>::create({
+    //     std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+    //     std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
+    //     std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+    //                                                                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims());
+    // const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+    //                                                                std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims());
+
+
+    // auto a = std::static_pointer_cast<Tensor>(mOp.getRawInput(0));
+    // auto b = std::static_pointer_cast<Tensor>(mOp.getRawInput(1));
+
+    // // Call kernel
+    // kernelFunc(inputDims0,
+    //     inputDims1,
+    //     std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+    //     getCPUPtr(mOp.getRawInput(0)),
+    //     getCPUPtr(mOp.getRawInput(1)),
+    //     getCPUPtr(mOp.getRawOutput(0)));
+
+/////////////////////////////////////////////////////////////////
+
+    // [5,2,1,7] & [2,6,7]
+    // 1. Same number of dimensions -> [5,2,1,7] & [1,2,6,7]
+    // 2. Find the highest equal dimension -> 3
+    //    Exception: if the first diverging dimension is the last one, then -> 4 (dims.size())
+    // 3. Compute the highest number of contiguous data -> 7
+    // 4. Compute stride and offset step for the broadcast mechnism
+    // 5. Call a simple kernel
+    const auto& opTensor = static_cast<const Div_Op&>(mOp);
+
     // Find the correct kernel type
     auto kernelFunc = Registrar<DivImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
-
-    // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)))->size(),
-        std::static_pointer_cast<Tensor>(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)))->size(),
-        getCPUPtr(mOp.getRawInput(0)),
-        getCPUPtr(mOp.getRawInput(1)),
-        getCPUPtr(mOp.getRawOutput(0)));
+        opTensor.getInput(0)->dataType(),
+        opTensor.getInput(1)->dataType(),
+        opTensor.getOutput(0)->dataType()});
+
+    // Compute compatible input dimensions
+    std::vector<std::size_t>        dims0   = opTensor.getInput(0)->dims();
+    std::vector<std::size_t>        dims1   = opTensor.getInput(1)->dims();
+    const std::vector<std::size_t>& outDims = opTensor.getOutput(0)->dims();
+
+    // if (dims0 == dims1) {
+    //     const std::size_t input0_contiguous_size = std::accumulate(dims0.cbegin(), dims0.cend(), std::size_t(1), std::multiplies<std::size_t>());
+    //     kernelFunc(input0_contiguous_size, input0_contiguous_size, input0_contiguous_size,
+    //                 getCPUPtr(mOp.getRawInput(0)),
+    //                 getCPUPtr(mOp.getRawInput(1)),
+    //                 getCPUPtr(mOp.getRawOutput(0)));
+    //     return;
+    // }
+
+    if (dims0.size() > dims1.size()) {
+        dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
+    }
+    else if (dims1.size() > dims0.size()) {
+        dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
+    }
+
+    const std::size_t nbDims = dims0.size();
+
+    // Find the highest equal dimension
+    std::size_t contiguousIdx = nbDims - 1;
+    for (; contiguousIdx+1 > 0; --contiguousIdx) {
+        if (dims0[contiguousIdx] != dims1[contiguousIdx]) {
+            if (contiguousIdx == (nbDims -1)) { // last dimensions of one of the input Tensor are of size 1
+                const std::vector<std::size_t>& dims = (dims0[contiguousIdx] == 1) ? dims0 : dims1;
+                while ((contiguousIdx+1 > 0) && (dims[contiguousIdx] == 1)) {
+                    --contiguousIdx;
+                }
+            }
+            break;
+        }
+    }
+    ++contiguousIdx;
+
+    // Compute the highest number of contiguous data for each Tensor
+    const std::size_t input0_contiguous_size = std::accumulate(dims0.cbegin()+contiguousIdx, dims0.cend(), std::size_t(1), std::multiplies<std::size_t>());
+    const std::size_t input1_contiguous_size = std::accumulate(dims1.cbegin()+contiguousIdx, dims1.cend(), std::size_t(1), std::multiplies<std::size_t>());
+    const std::size_t output_contiguous_size = std::accumulate(outDims.cbegin()+contiguousIdx, outDims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+
+    // initialize strides to iterate through data because of broadcasting
+    std::int32_t *stride_post0;
+    std::int32_t *stride_post1;
+    std::int32_t *stride_step0;
+    std::int32_t *stride_step1;
+    if (contiguousIdx > 0) {
+        stride_post0 = new std::int32_t[contiguousIdx];
+        stride_post0[contiguousIdx - 1] = 1;
+        stride_post1 = new std::int32_t[contiguousIdx];
+        stride_post1[contiguousIdx - 1] = 1;
+        for (std::size_t i = contiguousIdx - 2; i != static_cast<std::size_t>(-1); --i) {
+            stride_post0[i] = stride_post0[i+1]*static_cast<std::int32_t>(dims0[i+1]);
+            stride_post1[i] = stride_post1[i+1]*static_cast<std::int32_t>(dims1[i+1]);
+        }
+        stride_step0 = new std::int32_t[contiguousIdx];
+        stride_step1 = new std::int32_t[contiguousIdx];
+        for (std::size_t i = 0; i != contiguousIdx; ++i) {
+            stride_step0[i] = (dims0[i] == 1) ? 1 - stride_post0[i] : 1;
+            stride_step1[i] = (dims1[i] == 1) ? 1 - stride_post1[i] : 1;
+        }
+    }
+
+    // variables for arrays offsets
+    std::size_t offsetIn0 = 0;
+    std::size_t offsetIn1 = 0;
+    std::size_t offsetOut = 0;
+
+
+    std::size_t dim = contiguousIdx - 1;
+    const std::size_t nbStacks = std::accumulate(outDims.cbegin(), outDims.cbegin() + contiguousIdx, std::size_t(1), std::multiplies<std::size_t>());
+    for (std::size_t stack = 0; stack < nbStacks;) {
+        kernelFunc(input0_contiguous_size, input1_contiguous_size, output_contiguous_size,
+                    getCPUPtr(mOp.getRawInput(0), offsetIn0*input0_contiguous_size),
+                    getCPUPtr(mOp.getRawInput(1), offsetIn1*input1_contiguous_size),
+                    getCPUPtr(mOp.getRawOutput(0), offsetOut*output_contiguous_size));
+        if (++stack < nbStacks) {
+            std::size_t tmp_stack = stack;
+            while(tmp_stack % outDims[dim] == 0) {
+                tmp_stack /= outDims[dim];
+                dim--;
+            }
+            offsetIn0 += stride_step0[dim];
+            offsetIn1 += stride_step1[dim];
+            ++offsetOut;
+            dim = contiguousIdx - 1;
+        }
+    }
+    if (contiguousIdx > 0) {
+        delete[] stride_post0;
+        delete[] stride_post1;
+        delete[] stride_step0;
+        delete[] stride_step1;
+    }
 }
diff --git a/src/operator/ErfImpl.cpp b/src/operator/ErfImpl.cpp
index 06ec65008aee41215192cd05e126ac4f82388c1b..55752e4f5b9f798a6901e108ddcba2f61fdf9774 100644
--- a/src/operator/ErfImpl.cpp
+++ b/src/operator/ErfImpl.cpp
@@ -9,32 +9,34 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
+#include "aidge/backend/cpu/operator/ErfImpl.hpp"
+
+#include <memory>
 #include <vector>
 
+#include "aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Erf.hpp"
 #include "aidge/utils/Types.h"
 
-#include "aidge/backend/cpu/operator/ErfImpl.hpp"
-#include "aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp"
-
 Aidge::NbElts_t Aidge::ErfImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
     return 0;
 }
 
 void Aidge::ErfImpl_cpu::forward() {
+    const Erf_Op& op = static_cast<const Erf_Op&>(mOp);
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<ErfImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+                            op.getInput(0)->dataType(),
+                            op.getOutput(0)->dataType()
+                        });
 
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+    kernelFunc(
+        op.getInput(0)->size(),
+        op.getInput(0)->getImpl()->rawPtr(),
+        op.getOutput(0)->getImpl()->rawPtr()
+    );
 }
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index bc4a7a7cab91049c623e9a9e95ee63367da00722..995245907c8c87b0367c7edfa4493bd6b7faf660 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -57,9 +57,10 @@ void Aidge::FCImpl_cpu::forward()
     const auto& input2 = std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->refCastFrom(input2Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
 
     // Call kernel
+    const auto batchSize = (input0.dims().size() > 1) ? input0.dims()[0] : 1;
     kernelFunc(dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(),
-        input0.dims()[0],
-        input0.size() / input0.dims()[0],
+        batchSize,
+        input0.size() / batchSize,
         input0.getImpl()->rawPtr(), input1.getImpl()->rawPtr(), input2.getImpl()->rawPtr(),
         getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/GatherImpl.cpp b/src/operator/GatherImpl.cpp
index ce98627d95e0d05541db1ccaf4896abe756431b0..d80b53e7e864faf3fca289f94aba4f511bcba161 100644
--- a/src/operator/GatherImpl.cpp
+++ b/src/operator/GatherImpl.cpp
@@ -9,32 +9,34 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
+#include "aidge/backend/cpu/operator/GatherImpl.hpp"
+
+#include <memory>
 #include <vector>
 
+#include "aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Gather.hpp"
 #include "aidge/utils/Types.h"
 
-#include "aidge/backend/cpu/operator/GatherImpl.hpp"
-#include "aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp"
-
 Aidge::NbElts_t Aidge::GatherImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
     return 0;
 }
 
 void Aidge::GatherImpl_cpu::forward() {
+    const Gather_Op& op = static_cast<const Gather_Op&>(mOp);
 
     auto kernelFunc = Registrar<GatherImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+                            op.getInput(0)->dataType(),
+                            op.getOutput(0)->dataType()
+                        });
 
     // Call kernel
     kernelFunc(dynamic_cast<const Gather_Op&>(mOp).getStaticAttributes(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+            op.getInput(0)->dims(),
+            op.getInput(0)->getImpl()->rawPtr(),
+            op.getOutput(0)->getImpl()->rawPtr()
+        );
 }
diff --git a/src/operator/MatMulImpl.cpp b/src/operator/MatMulImpl.cpp
index f02effb3172e2c0624c6c7532513a2b794ee3a89..488af17617d556ad7a9d9b73909324d67a672459 100644
--- a/src/operator/MatMulImpl.cpp
+++ b/src/operator/MatMulImpl.cpp
@@ -9,15 +9,14 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
+#include <cstddef>  // std::size_t
+#include <cstdint>  // std::int32_t
+#include <numeric>  // std::accumulate
 #include <vector>
 
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
 #include "aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp"
@@ -30,27 +29,110 @@ void Aidge::MatMulImpl_cpu::forward()
     // Find the correct kernel type
     auto kernelFunc = Registrar<MatMulImplForward_cpu>::create(
         {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
          std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
-    // Call kernel
-    // if (mOp.getInput(0)->nbDims() == 4) {
-    //     kernelFunc(
-    //         mOp.getStaticAttributes(),
-    //         std::static_pointer_cast<Tensor>(mOp.getInput(0))->template dims<4>(),
-    //         mOp.getInput(0))->getImpl()->rawPtr(),
-    //         mOp.mInputs[1]->getImpl()->rawPtr(),
-    //         mOp.mInputs[2]->getImpl()->rawPtr(),
-    //         getCPUPtr(mOp.getRawOutput(0));
-    // }
-    // else
-    kernelFunc(
-        dynamic_cast<const MatMul_Op&>(mOp).getStaticAttributes(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0],
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size() / std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0],
-        getCPUPtr(mOp.getRawInput(0)),
-        getCPUPtr(mOp.getRawInput(1)),
-        getCPUPtr(mOp.getRawOutput(0)));
+    // Compute compatible input dimensions
+    std::vector<std::size_t> dims0 = static_cast<const MatMul_Op&>(mOp).getInput(0)->dims();
+    std::vector<std::size_t> dims1 = static_cast<const MatMul_Op&>(mOp).getInput(1)->dims();
+
+    // keep second-to-last dimension of dims0
+    const std::size_t keepDim0 = (dims0.size() > 1) ? 1 : 0;
+    // keep last dimension of dims1
+    const std::size_t keepDim1 = (dims1.size() > 1) ? 1 : 0;
+
+    if (dims0.size() == 1) {
+        dims0.insert(dims0.cbegin(), 1);
+    }
+    if (dims1.size() == 1) {
+        dims1.push_back(1);
+    }
+
+    if (dims0.size() > dims1.size()) {
+        dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
+    }
+    else if (dims1.size() > dims0.size()) {
+        dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
+    }
 
+    // const std::size_t dims_size = std::max(dims0.size(), dims1.size());
+    // at this point, dims0.size() == dims1.size()
+    const std::size_t nbDims = dims0.size();
 
+    // initialize strides to iterate through data because of broadcasting
+    std::size_t *stride_post0;
+    std::size_t *stride_post1;
+    std::int32_t *stride_step0;
+    std::int32_t *stride_step1;
+    if (nbDims > 2) {
+        stride_post0 = new std::size_t[nbDims-2];
+        stride_post0[nbDims - 3] = 1;
+        stride_post1 = new std::size_t[nbDims-2];
+        stride_post1[nbDims - 3] = 1;
+        for (std::size_t i = nbDims-4; i != static_cast<std::size_t>(-1); --i) {
+            stride_post0[i] = stride_post0[i+1]*dims0[i+1];
+            stride_post1[i] = stride_post1[i+1]*dims1[i+1];
+        }
+        stride_step0 = new std::int32_t[nbDims-2];
+        stride_step1 = new std::int32_t[nbDims-2];
+        for (std::size_t i = 0; i != nbDims-2; ++i) {
+            stride_step0[i] = (dims0[i] == 1) ? 1 - static_cast<std::int32_t>(stride_post0[i]) : 1;
+            stride_step1[i] = (dims1[i] == 1) ? 1 - static_cast<std::int32_t>(stride_post1[i]) : 1;
+        }
+    }
+
+    const std::vector<std::size_t>& outDims = static_cast<const MatMul_Op&>(mOp).getOutput(0)->dims();
+    const std::size_t nbMatrices = std::accumulate(outDims.cbegin(), outDims.cend() - keepDim0 - keepDim1, 1, std::multiplies<std::size_t>());
+    std::size_t dim = outDims.size() - 1 - keepDim0 - keepDim1;
+
+    // variables for arrays offsets
+    std::size_t offsetIn0 = 0;
+    std::size_t offsetIn1 = 0;
+    std::size_t offsetOut = 0;
+    const std::size_t n = dims0[nbDims - 2];
+    const std::size_t k = dims0[nbDims - 1];
+    const std::size_t m = dims1[nbDims - 1];
+    const std::size_t matrix0Size = n*k;
+    const std::size_t matrix1Size = k*m;
+    const std::size_t matrixOutSize = n*m;
+    for (std::size_t stack = 0; stack < nbMatrices;) {
+        kernelFunc(n, k, m,
+                    getCPUPtr(mOp.getRawInput(0), offsetIn0*matrix0Size),
+                    getCPUPtr(mOp.getRawInput(1), offsetIn1*matrix1Size),
+                    getCPUPtr(mOp.getRawOutput(0), offsetOut*matrixOutSize));
+        if (++stack < nbMatrices) {
+            std::size_t tmp_stack = stack;
+            while(tmp_stack % outDims[dim] == 0) {
+                tmp_stack /= outDims[dim];
+                dim--;
+            }
+            offsetIn0 += stride_step0[dim];
+            offsetIn1 += stride_step1[dim];
+            ++offsetOut;
+            dim = outDims.size() - 1 - keepDim0 - keepDim1;
+        }
+    }
+    if (nbDims > 2) {
+        delete[] stride_post0;
+        delete[] stride_post1;
+        delete[] stride_step0;
+        delete[] stride_step1;
+    }
 }
+
+// void Aidge::MatMulImpl_cpu::forward()
+// {
+//     assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+//     assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)) && "missing input #1");
+
+//     // Find the correct kernel type
+//     auto kernelFunc = Registrar<MatMulImplForward_cpu>::create(
+//         {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+//          std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+//     kernelFunc(
+//         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+//         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims(),
+//         getCPUPtr(mOp.getRawInput(0)),
+//         getCPUPtr(mOp.getRawInput(1)),
+//         getCPUPtr(mOp.getRawOutput(0)));
+// }
diff --git a/src/operator/MemorizeImpl.cpp b/src/operator/MemorizeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b2956231ec29784158ea27c68d4ec21a8c4ccc64
--- /dev/null
+++ b/src/operator/MemorizeImpl.cpp
@@ -0,0 +1,81 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/operator/Memorize.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+
+#include "aidge/backend/cpu/operator/MemorizeImpl.hpp"
+
+Aidge::DimSize_t Aidge::MemorizeImpl_cpu::getNbRequiredData(
+    Aidge::IOIndex_t inputIdx) const
+{
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
+
+    if (scheduleStep == 0 && inputIdx == 0) {
+        // No data input is required for the initial step.
+        // Initialization data is required however.
+        return 0;
+    }
+    else if (scheduleStep > 0 && inputIdx == 1) {
+        // No initialization data is required after the initial step.
+        return 0;
+    }
+    else {
+        return OperatorImpl::getNbRequiredData(inputIdx);
+    }
+}
+
+Aidge::NbElts_t Aidge::MemorizeImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
+    assert(mOp.getRawOutput(outputIdx) && "requires valid output");
+
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
+    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
+
+    if (endStep > 0 && outputIdx == 1 && scheduleStep >= endStep) {
+        return 0;
+    }
+    else {
+        return std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx))->size();
+    }
+}
+
+void Aidge::MemorizeImpl_cpu::updateConsummerProducer() {
+    OperatorImpl::updateConsummerProducer();
+
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
+    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
+    AIDGE_ASSERT(endStep == 0 || scheduleStep <= endStep, "cannot update consumer producer anymore, number of cycles exceeded");
+}
+
+void Aidge::MemorizeImpl_cpu::forward() {
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    const unsigned int forwardStep = op.template getAttr<MemorizeAttr::ForwardStep>();
+    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
+    AIDGE_ASSERT(endStep == 0 || forwardStep <= endStep, "cannot forward anymore, number of cycles exceeded");
+
+    if (forwardStep == 0) {
+        op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size());
+    }
+    else {
+        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+    }
+}
diff --git a/src/operator/MulImpl.cpp b/src/operator/MulImpl.cpp
index fda49c3f20ed5cbe519d729a0bf759f0964a99fd..87d180b013e44a49cb887ce722533c50206f3889 100644
--- a/src/operator/MulImpl.cpp
+++ b/src/operator/MulImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Mul.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/MulImpl.hpp"
@@ -34,9 +35,15 @@ void Aidge::MulImpl_cpu::forward() {
         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
+    const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+                                                                   std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims());
+    const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+                                                                   std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims());
+
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(),
+    kernelFunc(inputDims0,
+        inputDims1,
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawInput(1)),
         getCPUPtr(mOp.getRawOutput(0)));
diff --git a/src/operator/PopImpl.cpp b/src/operator/PopImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..86850610c75f827d9c29e6a0506397c5a844cb00
--- /dev/null
+++ b/src/operator/PopImpl.cpp
@@ -0,0 +1,39 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/operator/Pop.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+
+#include "aidge/backend/cpu/operator/PopImpl.hpp"
+
+Aidge::NbElts_t Aidge::PopImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
+
+    return std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size()
+        / std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->dims()[0];
+}
+
+void Aidge::PopImpl_cpu::forward() {
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+
+    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+    const unsigned int forwardStep = op.template getAttr<PopAttr::ForwardStep>();
+
+    *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))
+        = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->extract({forwardStep});
+}
diff --git a/src/operator/PowImpl.cpp b/src/operator/PowImpl.cpp
index 496646402e33869cfcbe7dae96e1fc81b875d0dd..22b4e27afd4e327c42be066bf7eeb6effdd8b2a9 100644
--- a/src/operator/PowImpl.cpp
+++ b/src/operator/PowImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Pow.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
@@ -34,9 +35,15 @@ void Aidge::PowImpl_cpu::forward() {
         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
+    const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+                                                                   std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims());
+    const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+                                                                   std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims());
+
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(),
+    kernelFunc(inputDims0,
+        inputDims1,
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawInput(1)),
         getCPUPtr(mOp.getRawOutput(0)));
diff --git a/src/operator/ProducerImpl.cpp b/src/operator/ProducerImpl.cpp
deleted file mode 100644
index d5432c0db9a4da1386e6edd424d170e0c5475740..0000000000000000000000000000000000000000
--- a/src/operator/ProducerImpl.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <cassert>
-#include <memory>
-#include <vector>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/utils/Types.h"
-
-#include "aidge/backend/cpu/operator/ProducerImpl.hpp"
-
-Aidge::DimSize_t Aidge::ProducerImpl_cpu::getNbProducedData(
-    Aidge::IOIndex_t outputIdx) const
-{
-    // Requires the whole tensors, regardless of available data on inputs
-    assert(outputIdx == 0 && "operator has only one output");
-    (void) outputIdx;
-
-    return std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size();
-}
diff --git a/src/operator/ReduceMeanImpl.cpp b/src/operator/ReduceMeanImpl.cpp
index e31a53d84947e5b2ced14ee9ee6e2badaef07071..82f96f112016d0498d241ee9ed14989066cbc979 100644
--- a/src/operator/ReduceMeanImpl.cpp
+++ b/src/operator/ReduceMeanImpl.cpp
@@ -9,71 +9,87 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
+#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
+
+#include <memory>
 #include <vector>
 
 #include "aidge/utils/Types.h"
 #include "aidge/operator/ReduceMean.hpp"
-
-#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
 #include "aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp"
-Aidge::NbElts_t Aidge::ReduceMeanImpl1D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return 0;
-}
-Aidge::NbElts_t Aidge::ReduceMeanImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return 0;
-}
-Aidge::NbElts_t Aidge::ReduceMeanImpl3D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+
+Aidge::NbElts_t Aidge::ReduceMeanImpl_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
     return 0;
 }
+// Aidge::NbElts_t Aidge::ReduceMeanImpl1D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+//     // this implementation can be in-place
+//     return 0;
+// }
+// Aidge::NbElts_t Aidge::ReduceMeanImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+//     // this implementation can be in-place
+//     return 0;
+// }
+// Aidge::NbElts_t Aidge::ReduceMeanImpl3D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+//     // this implementation can be in-place
+//     return 0;
+// }
 
-void Aidge::ReduceMeanImpl1D_cpu::forward() {
-
+void Aidge::ReduceMeanImpl_cpu::forward() {
+    const ReduceMean_Op& op_ = dynamic_cast<const ReduceMean_Op&>(mOp);
     // Find the correct kernel type
-    auto kernelFunc =
-            Registrar<ReduceMeanImpl1DForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+    auto kernelFunc = Registrar<ReduceMeanImplForward_cpu>::create({
+        op_.getInput(0)->dataType(),
+        op_.getOutput(0)->dataType()});
 
     // Call kernel
-    kernelFunc(dynamic_cast<const ReduceMean_Op<1>&>(mOp).getStaticAttributes(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+    kernelFunc(op_.getStaticAttributes(),
+               op_.getInput(0)->dims(),
+               op_.getInput(0)->getImpl()->rawPtr(),
+               op_.getOutput(0)->getImpl()->rawPtr());
 }
 
-void Aidge::ReduceMeanImpl2D_cpu::forward() {
+// void Aidge::ReduceMeanImpl1D_cpu::forward() {
 
-    // Find the correct kernel type
-    auto kernelFunc =
-            Registrar<ReduceMeanImpl2DForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+//     // Find the correct kernel type
+//     auto kernelFunc =
+//             Registrar<ReduceMeanImpl1DForward_cpu>::create({
+//         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+//         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
-    // Call kernel
-    kernelFunc(dynamic_cast<const ReduceMean_Op<2>&>(mOp).getStaticAttributes(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
-}
+//     // Call kernel
+//     kernelFunc(dynamic_cast<const ReduceMean_Op<1>&>(mOp).getStaticAttributes(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+// }
 
-void Aidge::ReduceMeanImpl3D_cpu::forward() {
+// void Aidge::ReduceMeanImpl2D_cpu::forward() {
 
-    // Find the correct kernel type
-    auto kernelFunc =
-            Registrar<ReduceMeanImpl3DForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+//     // Find the correct kernel type
+//     auto kernelFunc =
+//             Registrar<ReduceMeanImpl2DForward_cpu>::create({
+//         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+//         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
-    // Call kernel
-    kernelFunc(dynamic_cast<const ReduceMean_Op<3>&>(mOp).getStaticAttributes(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
-}
\ No newline at end of file
+//     // Call kernel
+//     kernelFunc(dynamic_cast<const ReduceMean_Op<2>&>(mOp).getStaticAttributes(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+// }
+
+// void Aidge::ReduceMeanImpl3D_cpu::forward() {
+
+//     // Find the correct kernel type
+//     auto kernelFunc =
+//             Registrar<ReduceMeanImpl3DForward_cpu>::create({
+//         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+//         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+//     // Call kernel
+//     kernelFunc(dynamic_cast<const ReduceMean_Op<3>&>(mOp).getStaticAttributes(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+// }
\ No newline at end of file
diff --git a/src/operator/ReshapeImpl.cpp b/src/operator/ReshapeImpl.cpp
index 02dea1da3d4422abf37b62193bba83e83c87a83f..11df6f663d9a78476103d9671d9d428719c0126d 100644
--- a/src/operator/ReshapeImpl.cpp
+++ b/src/operator/ReshapeImpl.cpp
@@ -9,13 +9,13 @@
  *
  ********************************************************************************/
 
-#include <cassert>
+#include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
 
+#include "aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Reshape.hpp"
 #include "aidge/utils/Types.h"
-
-#include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
-#include "aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 Aidge::NbElts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
@@ -23,17 +23,17 @@ Aidge::NbElts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOIn
 }
 
 void Aidge::ReshapeImpl_cpu::forward() {
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size() == 
-           std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size()
-            && "input must have the same overall size as shape");
+    const Reshape_Op& op_ = static_cast<const Reshape_Op&>(mOp);
+    AIDGE_ASSERT(op_.getInput(0)->size() == op_.getOutput(0)->size(),
+                    "input must have the same overall size as shape");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<ReshapeImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+        op_.getInput(0)->dataType(),
+        op_.getOutput(0)->dataType()});
 
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+    kernelFunc(op_.getInput(0)->size(),
+               op_.getInput(0)->getImpl()->rawPtr(),
+               op_.getOutput(0)->getImpl()->rawPtr());
 }
diff --git a/src/operator/SigmoidImpl.cpp b/src/operator/SigmoidImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7322e08ba01bfb931382cf17691e705dfaeeb6c1
--- /dev/null
+++ b/src/operator/SigmoidImpl.cpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/operator/Sigmoid.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+
+#include "aidge/backend/cpu/operator/SigmoidImpl.hpp"
+#include "aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::SigmoidImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+
+void Aidge::SigmoidImpl_cpu::forward() {
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<SigmoidImplForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawOutput(0)));
+}
diff --git a/src/operator/SliceImpl.cpp b/src/operator/SliceImpl.cpp
index 32d31f046465425a269d6f8e3fc52eaad31c663a..c1a6480c1e7c0d681abef12f06a57e140d1e9efd 100644
--- a/src/operator/SliceImpl.cpp
+++ b/src/operator/SliceImpl.cpp
@@ -79,4 +79,4 @@ void Aidge::SliceImpl_cpu::forward() {
     mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
-void Aidge::SliceImpl_cpu::backward() { printf("Not implemented yet.\n"); }
+void Aidge::SliceImpl_cpu::backward() { fmt::print("Not implemented yet.\n"); }
diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
index 038a1154182ea8f359cf1b485c3de251ffbbaed5..475f8cb8704739e091f0b8f01ffce680fd851e1f 100644
--- a/src/operator/SubImpl.cpp
+++ b/src/operator/SubImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Sub.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
@@ -35,9 +36,15 @@ void Aidge::SubImpl_cpu::forward() {
         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
+    const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+                                                                   std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims());
+    const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
+                                                                   std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims());
+
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(),
+    kernelFunc(inputDims0,
+        inputDims1,
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawInput(1)),
         getCPUPtr(mOp.getRawOutput(0)));
diff --git a/src/operator/TanhImpl.cpp b/src/operator/TanhImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c4658440ab00086be6a469c19d5ea89771857fb1
--- /dev/null
+++ b/src/operator/TanhImpl.cpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/operator/Tanh.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+
+#include "aidge/backend/cpu/operator/TanhImpl.hpp"
+#include "aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::TanhImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+
+void Aidge::TanhImpl_cpu::forward() {
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<TanhImplForward_cpu>::create({
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawOutput(0)));
+}
diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp
index cfcfb45e3735538c1650cfd990ea85e2333916ad..31fbed4c090f5e4848df12f2bc2ccd36e3aedf9d 100644
--- a/unit_tests/data/Test_TensorImpl.cpp
+++ b/unit_tests/data/Test_TensorImpl.cpp
@@ -9,92 +9,184 @@
  *
  ********************************************************************************/
 
-#include <array>
-
 #include <catch2/catch_test_macros.hpp>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::uint16_t
+#include <chrono>
+#include <iostream>
+#include <memory>
+#include <numeric>   // std::accumulate
+#include <random>    // std::random_device, std::mt19937, std::uniform_real_distribution
 
 #include "aidge/data/Tensor.hpp"
-#include "aidge/utils/TensorUtils.hpp"
 #include "aidge/backend/cpu/data/TensorImpl.hpp"
-
-using namespace Aidge;
-
-TEST_CASE("Tensor creation") {
-  SECTION("from const array") {
-    Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
-
-    Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
-
-    Tensor xFloat =
-        Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
-
-    SECTION("Tensor features") {
-      REQUIRE(x.nbDims() == 3);
-      REQUIRE(x.dims()[0] == 2);
-      REQUIRE(x.dims()[1] == 2);
-      REQUIRE(x.dims()[2] == 2);
-      REQUIRE(x.size() == 8);
+#include "aidge/operator/Add.hpp"
+#include "aidge/backend/cpu/operator/AddImpl.hpp"
+
+namespace Aidge {
+
+TEST_CASE("Test addition of Tensors","[TensorImpl][Add]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
+    std::uniform_int_distribution<int> boolDist(0,1);
+
+    // Create MatMul Operator
+    std::shared_ptr<Node> mySub = Add(2);
+    auto op = std::static_pointer_cast<OperatorTensor>(mySub-> getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // Create 2 input Tensors
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op->associateInput(0,T0);
+    T0->setDataType(DataType::Float32);
+    T0->setBackend("cpu");
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+    T1->setDataType(DataType::Float32);
+    T1->setBackend("cpu");
+
+    // Create results Tensor
+    Tensor Tres{};
+    Tres.setDataType(DataType::Float32);
+    Tres.setBackend("cpu");
+
+    // To measure execution time of 'MatMul_Op::forward()' member function call
+    std::chrono::time_point<std::chrono::system_clock> start;
+    std::chrono::time_point<std::chrono::system_clock> end;
+    std::chrono::duration<double, std::micro> duration{};
+
+    std::size_t number_of_operation = 0;
+
+    for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+        // generate 2 random Tensors
+        // handle dimensions, replace some dimensions with '1' to get broadcasting
+        constexpr std::size_t nbDims = 4;
+        std::vector<std::size_t> dims;
+        for (std::size_t i = 0; i < nbDims; ++i) {
+            dims.push_back(dimSizeDist(gen));
+        }
+        std::vector<std::size_t> dims0 = dims;
+        std::vector<std::size_t> dims1 = dims;
+        std::vector<std::size_t> dimsOut = dims;
+        for (std::size_t i = 0; i < nbDims; ++i) {
+            if (boolDist(gen)) {
+                dims0[i] = 1;
+            }
+            if (boolDist(gen)) {
+                dims1[i] = 1;
+            }
+            dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i];
+        }
+
+        // create arrays and fill them with random values
+        float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+        float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]];
+        float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+        for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) {
+            array0[i] = valueDist(gen);
+        }
+        for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) {
+            array1[i] = valueDist(gen);
+        }
+
+        // compute true result
+        const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+        const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1};
+        for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+            for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                            + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0)
+                                            + strides1[1] * ((dims1[1] > 1) ? b : 0);
+                for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                    const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                    for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                        std::size_t idx0 = idx0_0
+                                            + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                            + ((dims0[3] > 1) ? d : 0);
+                        std::size_t idx1 = idx1_0
+                                            + strides1[2] * ((dims1[2] > 1) ? c : 0)
+                                            + ((dims1[3] > 1) ? d : 0);
+                        result[idx_out + d] = array0[idx0] + array1[idx1];
+                        // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " - " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                    }
+                }
+            }
+        }
+
+        // conversion to Aidge::Tensors
+        // input0
+        T0->resize(dims0);
+        T0->getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+        // input1
+        T1->resize(dims1);
+        T1->getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]);
+
+        // results
+        Tres.resize(dimsOut);
+        Tres.getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+        Tensor T2 = *T0 + *T1;
+        REQUIRE(T2 == Tres);
+
+    // no implementation
+        Tensor T3(T1->dims());
+        REQUIRE_THROWS(*T0 + T3);
+
+        // // wrong backend
+        // static Registrar<Add_Op> registrarAddImpl_custom("custom", [](const Add_Op& op) { return std::make_unique<AddImpl_cpu>(op); } );
+        // static Registrar<Tensor> registrarTensorImpl_custom_Int32({"custom", DataType::Int32},
+        //             [] (DeviceIdx_t device, std::vector<DimSize_t> dims) {
+        //                 return std::make_shared<TensorImpl_cpu<int>>(device, dims);
+        //             }
+        //         );
+        // T1.setBackend("custom");
+        // REQUIRE_THROWS(T0 + T1);
+
+    // wrong datatype
+        Tensor T4(T1->dims());
+        T4.setDataType(DataType::Float64);
+        REQUIRE_THROWS(*T0 + T4);
     }
+}
 
-    SECTION("Access to array") {
-      REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
-      REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
-    }
+TEST_CASE("Test substraction of Tensors","[TensorImpl][Sub]") {
+    Tensor T0 = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+    Tensor T1 = Array3D<int, 2, 2, 2>{{{{7, 1}, {3, 7}}, {{54, 0}, {7, 12}}}};
+    Tensor T2 = T0 - T1;
+    T2.print();
+    REQUIRE(T2 == Tensor(Array3D<int, 2, 2, 2>{{{{-6,1},{0,-3}},{{-49,6},{0,-4}}}}));
 
-    SECTION("get function") {
-      REQUIRE(x.get<int>({0, 0, 0}) == 1);
-      REQUIRE(x.get<int>({0, 0, 1}) == 2);
-      REQUIRE(x.get<int>({0, 1, 1}) == 4);
-      REQUIRE(x.get<int>({1, 1, 0}) == 7);
-      x.set<int>({1, 1, 1}, 36);
-      REQUIRE(x.get<int>({1, 1, 1}) == 36);
-    }
+    Tensor T3(T1.dims());
+    REQUIRE_THROWS(T0 - T3);
+}
 
-    SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); }
+TEST_CASE("Test multiplication of Tensors","[TensorImpl][Mul]") {
+    Tensor T0 = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+    Tensor T1 = Array3D<int, 2, 2, 2>{{{{7, 2}, {3, 7}}, {{5, 6}, {7, 8}}}};
+    Tensor T2 = T0 * T1;
+    T2.print();
+    REQUIRE(T2 == Tensor(Array3D<int, 2, 2, 2>{{{{7,4},{9,28}},{{25,36},{49,64}}}}));
 
-    SECTION("Tensor (in)equality") {
-      REQUIRE(x == xCopy);
-      REQUIRE_FALSE(x == xFloat);
-    }
-  }
+    Tensor T3(T1.dims());
+    REQUIRE_THROWS(T0 * T3);
 }
 
-TEST_CASE("Tensor methods") {
-  Tensor x = Array3D<int, 2, 2, 2>{{
-    {{1, 2},
-     {3, 4}},
-    {{5, 6},
-     {7, 8}}
-  }};
-
-  Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
-
-  Tensor xFloat =
-      Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
-
-  SECTION("Tensor sharing") {
-    Tensor xCopyCtor(x);
-    REQUIRE(xCopyCtor.getImpl() == x.getImpl());
-
-    Tensor xEqOp = x;
-    REQUIRE(xEqOp.getImpl() == x.getImpl());
-
-    Tensor xCloned = x.clone();
-    REQUIRE(xCloned.getImpl() != x.getImpl());
-    REQUIRE(xCloned == x);
-  }
-
-  SECTION("Tensor extract") {
-    Tensor y = x.extract({0, 1});
-    REQUIRE(y.getImpl() == x.getImpl());
-    REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
-    REQUIRE(y.isContiguous());
-    
-    Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
-    REQUIRE(y2.getImpl() == x.getImpl());
-    REQUIRE(!y2.isContiguous());
-    Tensor y3 = y2.clone();
-    REQUIRE(y3.isContiguous());
-    REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
-  }
+TEST_CASE("Test division of Tensors","[TensorImpl][Div]") {
+    Tensor T0 = Array3D<int, 2, 2, 2>{{{{7,4},{9,28}},{{25,36},{49,64}}}};
+    Tensor T1 = Array3D<int, 2, 2, 2>{{{{7, 2}, {3, 7}}, {{5, 6}, {7, 8}}}};
+    Tensor T2 = T0 / T1;
+    T2.print();
+    REQUIRE(T2 == Tensor(Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}));
+
+    Tensor T3(T1.dims());
+    REQUIRE_THROWS(T0 / T3);
 }
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_AddImpl.cpp b/unit_tests/operator/Test_AddImpl.cpp
index 740b1a5322b55e2347d93ed2e515358080a108a5..e2e7051afda5e7f72c3142987587179bc759f1e8 100644
--- a/unit_tests/operator/Test_AddImpl.cpp
+++ b/unit_tests/operator/Test_AddImpl.cpp
@@ -117,4 +117,63 @@ TEST_CASE("[cpu/operator] Add(forward)", "[Add][CPU]") {
 
         REQUIRE(*op->getOutput(0) == *expectedOutput);
     }
+
+    SECTION("Broadcasting") {
+        std::shared_ptr<Tensor> input_0 = std::make_shared<Tensor>(Array4D<int,3,1,3,2> {
+        {                                       //
+            {                                   //
+                {{0, 1},{2, 3},{4, 5}}          //
+            },                                  //
+            {                                   //
+                {{6, 7},{8, 9},{10, 11}}        //
+            },                                  //
+            {                                   //
+                {{12, 13},{14, 15},{16, 17}}    //
+            }                                   //
+        }                                       //
+        });                                     //
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
+        {                                       //
+            {                                   //
+                {{20, 21},{22, 23},{24, 25}},   //
+                {{26, 27},{28, 29},{30, 31}},   //
+                {{32, 33},{34, 35},{36, 37}}    //
+            }                                   //
+        }                                       //
+        });                                     //
+
+        std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<int,2> {{100,200}});  
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
+            {                                               //
+                {                                           //
+                    {{ 120, 222},{ 124, 226},{ 128, 230}},  //
+                    {{ 126, 228},{ 130, 232},{ 134, 236}},  //
+                    {{ 132, 234},{ 136, 238},{ 140, 242}}   //
+                },                                          //
+                {                                           //
+                    {{ 126, 228},{ 130, 232},{ 134, 236}},  //
+                    {{ 132, 234},{ 136, 238},{ 140, 242}},  //
+                    {{ 138, 240},{ 142, 244},{ 146, 248}}   //
+                },                                          //
+                {                                           //
+                    {{ 132, 234},{ 136, 238},{140, 242}},   //
+                    {{ 138, 240},{ 142, 244},{146, 248}},   //
+                    {{ 144, 246},{ 148, 250},{152, 254}}    //
+                }                                           //
+            }                                               //
+        });                                                 //
+
+        std::shared_ptr<Node> myAdd = Add(3);
+        auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator());
+        op->associateInput(0, input_0);
+        op->associateInput(1, input_1);
+        op->associateInput(2, input_2);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
+        myAdd->forward();
+        op->getOutput(0)->print();
+        expectedOutput->print();
+        REQUIRE(*op->getOutput(0) == *expectedOutput);
+    }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_DivImpl.cpp b/unit_tests/operator/Test_DivImpl.cpp
index 16f69db964a092f6be87e5d983ba00694e8006f8..a0ed261fe9622f36a9bb2e46c4796ae7f6f8f5e6 100644
--- a/unit_tests/operator/Test_DivImpl.cpp
+++ b/unit_tests/operator/Test_DivImpl.cpp
@@ -10,202 +10,307 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::uint16_t
+#include <chrono>
+#include <iostream>
+#include <memory>
+#include <numeric>   // std::accumulate
+#include <random>    // std::random_device, std::mt19937, std::uniform_real_distribution
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Div.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 
-#include "aidge/backend/cpu.hpp"
+namespace Aidge {
 
-#include <memory>
+TEST_CASE("[cpu/operator] Div", "[Div][CPU]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
+    std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
+    std::uniform_int_distribution<int> boolDist(0,1);
 
-using namespace Aidge;
+    // Create MatMul Operator
+    std::shared_ptr<Node> myDiv = Div();
+    auto op = std::static_pointer_cast<OperatorTensor>(myDiv-> getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // Create 2 input Tensors
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op->associateInput(0,T0);
+    T0->setDataType(DataType::Float32);
+    T0->setBackend("cpu");
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+    T1->setDataType(DataType::Float32);
+    T1->setBackend("cpu");
+
+    // Create results Tensor
+    std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>();
+    Tres->setDataType(DataType::Float32);
+    Tres->setBackend("cpu");
+
+    // To measure execution time of 'MatMul_Op::forward()' member function call
+    std::chrono::time_point<std::chrono::system_clock> start;
+    std::chrono::time_point<std::chrono::system_clock> end;
+    std::chrono::duration<double, std::micro> duration{};
+
+    SECTION("DivImpl_cpu::forward()") {
+        SECTION("Scalar / Scalar") {
 
-TEST_CASE("[cpu/operator] Div(forward)", "[Div][CPU]") {
-    SECTION("2D Tensor by Singleton") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.07607108, 0.44075000},
-                {0.19494885, 0.20071143}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{0.5}});
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.15214217, 0.88150001},
-                {0.38989770, 0.40142286}
-            }
-        });
-
-        std::shared_ptr<Node> myDiv = Div();
-        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
-        op -> associateInput(0, input_1);
-        op -> associateInput(1, input_2);
-        op -> setDataType(DataType::Float32);
-        op -> setBackend("cpu");
-        op -> computeOutputDims();
-        myDiv -> forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
         }
+        SECTION("Scalar / +1-D Tensor") {
 
-    }
+        }
+        SECTION("+1-D Tensor / +1-D Tensor - same dimensions") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("2D Tensors") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.79780143, 0.49322051},
-                {0.84239346, 0.83737719}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
-            {
-                {0.59088874, 0.78858775},
-                {0.42879432, 0.17615074}
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {1.35017204, 0.62544787},
-                {1.96456301, 4.75375366}
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                const std::size_t nbDims = nbDimsDist(gen);
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
+
+                // without broadcasting
+                float* array0 = new float[nb_elements];
+                float* array1 = new float[nb_elements];
+                float* result = new float[nb_elements];
+
+                for (std::size_t i = 0; i < nb_elements; ++i) {
+                    array0[i] = valueDist(gen);
+                    array1[i] = valueDist(gen);
+                    result[i] = array0[i] / array1[i];
+                }
+
+                // input0
+                T0->resize(dims);
+                T0 -> getImpl() -> setRawPtr(array0, nb_elements);
+
+                // input1
+                T1->resize(dims);
+                T1 -> getImpl() -> setRawPtr(array1, nb_elements);
+
+                // results
+                Tres->resize(dims);
+                Tres -> getImpl() -> setRawPtr(result, nb_elements);
+
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myDiv->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                // with broadcasting
             }
-        });
-
-        std::shared_ptr<Node> myDiv = Div();
-        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
-        op -> associateInput(0, input_1);
-        op -> associateInput(1, input_2);
-        op -> setDataType(DataType::Float32);
-        op -> setBackend("cpu");
-        op -> computeOutputDims();
-        myDiv->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
 
-    }
+        SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("3D Tensor by 1D Tensor") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.24180168, 0.44319558, 0.06437260},
-                 {0.21270001, 0.34570599, 0.44151264}},
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions, replace some dimensions with '1' to get broadcasting
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                std::vector<std::size_t> dims0 = dims;
+                std::vector<std::size_t> dims1 = dims;
+                std::vector<std::size_t> dimsOut = dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims0[i] = 1;
+                    }
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                    dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i];
+                }
 
-                {{0.62294692, 0.98043168, 0.18628585},
-                 {0.33591706, 0.03432965, 0.32130069}}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array1D<float,3>{
-            {0.63475525, 0.58620811, 0.69340748}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.38093686, 0.75603795, 0.09283517},
-                 {0.33508980, 0.58973253, 0.63672900}},
-
-                {{0.98139703, 1.67249763, 0.26865280},
-                 {0.52920723, 0.05856223, 0.46336490}}
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) {
+                    array1[i] = valueDist(gen);
+                }
+
+                // compute true result
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1[2] > 1) ? c : 0)
+                                                    + ((dims1[3] > 1) ? d : 0);
+                                result[idx_out + d] = array0[idx0] / array1[idx1];
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " / " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myDiv->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
             }
-        });
-
-        std::shared_ptr<Node> myDiv = Div();
-        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
-        op -> associateInput(0, input_1);
-        op -> associateInput(1, input_2);
-        op -> setDataType(DataType::Float32);
-        op -> setBackend("cpu");
-        op -> computeOutputDims();
-        myDiv->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 12; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
+        SECTION("+1-D Tensor / 1-D Tensor") {
+            std::size_t number_of_operation = 0;
+            std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(std::size_t(1), std::size_t(3));
 
-    }
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims0(4);
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims0[i] = dimSizeDist(gen);
+                }
+                std::vector<std::size_t> dimsOut = dims0;
+                std::vector<std::size_t> dims1 = dims0;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                }
+                dims1.erase(dims1.cbegin(), dims1.cbegin() + nbRemovedDimsDist(gen));
 
-    SECTION("4D Tensor") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
-            {
-                {
-                    {{0.25675946, 0.36265653, 0.22386390},
-                     {0.30483031, 0.97449398, 0.73871714},
-                     {0.36169255, 0.04510212, 0.27525920}},
-
-                    {{0.73255682, 0.03885978, 0.24181491},
-                    {0.14465559, 0.86070061, 0.88848090},
-                    {0.74408931, 0.87412918, 0.19800508}},
-
-                    {{0.43551809, 0.73437816, 0.37513995},
-                     {0.25414777, 0.06396711, 0.98708153},
-                     {0.02140611, 0.84974837, 0.62108254}}
-                },
-                {
-                    {{0.86227137, 0.69357753, 0.41814715},
-                     {0.76048166, 0.46306920, 0.05907208},
-                     {0.76625377, 0.91793799, 0.92988223}},
-
-                    {{0.34362513, 0.85009813, 0.21107805},
-                     {0.65575773, 0.38140792, 0.48540717},
-                     {0.10045588, 0.85803932, 0.23778951}},
-
-                    {{0.30316389, 0.04176688, 0.17290735},
-                     {0.07942408, 0.48647392, 0.39440966},
-                     {0.26543915, 0.92589515, 0.83948994}}
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                std::size_t array1_size = std::accumulate(dims1.cbegin(), dims1.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                float* array1 = new float[array1_size];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < (dims0[0]*dims0[1]*dims0[2]*dims0[3]); ++i) {
+                    array0[i] = valueDist(gen);
                 }
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{3.0}});
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
-            {
-                {
-                    {{0.08558649, 0.12088551, 0.07462130},
-                     {0.10161010, 0.32483134, 0.24623905},
-                     {0.12056419, 0.01503404, 0.09175307}},
-
-                    {{0.24418561, 0.01295326, 0.08060497},
-                     {0.04821853, 0.28690019, 0.29616031},
-                     {0.24802977, 0.29137638, 0.06600169}},
-
-                    {{0.14517270, 0.24479271, 0.12504666},
-                     {0.08471593, 0.02132237, 0.32902718},
-                     {0.00713537, 0.28324947, 0.20702751}}
-                },
-                {
-                    {{0.28742379, 0.23119251, 0.13938238},
-                     {0.25349388, 0.15435641, 0.01969069},
-                     {0.25541791, 0.30597934, 0.30996075}},
-
-                    {{0.11454171, 0.28336605, 0.07035935},
-                     {0.21858591, 0.12713598, 0.16180240},
-                     {0.03348529, 0.28601310, 0.07926317}},
-
-                    {{0.10105463, 0.01392229, 0.05763578},
-                     {0.02647469, 0.16215797, 0.13146989},
-                     {0.08847972, 0.30863172, 0.27982998}}
+                for (std::size_t i = 0; i < array1_size; ++i) {
+                    array1[i] = valueDist(gen);
                 }
+
+                // compute true result
+                auto dims1_tmp = dims1;
+                dims1_tmp.insert(dims1_tmp.cbegin(), 4 - dims1_tmp.size(), std::size_t(1));
+
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1_tmp[1]*dims1_tmp[2]*dims1_tmp[3], dims1_tmp[2]*dims1_tmp[3], dims1_tmp[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1_tmp[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1_tmp[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1_tmp[2] > 1) ? c : 0)
+                                                    + ((dims1_tmp[3] > 1) ? d : 0);
+                                result[idx_out + d] = array0[idx0] / array1[idx1];
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " / " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, array1_size);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myDiv->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
             }
-        });
-
-        std::shared_ptr<Node> myDiv = Div();
-        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
-        op -> associateInput(0, input_1);
-        op -> associateInput(1, input_2);
-        op -> setDataType(DataType::Float32);
-        op -> setBackend("cpu");
-        op -> computeOutputDims();
-        myDiv->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 54; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
     }
-}
\ No newline at end of file
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_MatMulImpl.cpp b/unit_tests/operator/Test_MatMulImpl.cpp
index 1edb915fb78e3e056f455ddecb8e704eee068cd9..5df0528b5d24be04b324cd05d1f964a57c35b3ea 100644
--- a/unit_tests/operator/Test_MatMulImpl.cpp
+++ b/unit_tests/operator/Test_MatMulImpl.cpp
@@ -10,102 +10,281 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <cstdint>  // std::uint16_t
+#include <chrono>
+#include <iostream>
 #include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_real_distribution
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
 
-using namespace Aidge;
+namespace Aidge {
 
 TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") {
-    // Test MatMul forward with batch size = 2 and feature size = 75
-    std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array2D<int, 5, 75>{
-            {{1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
-             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
-             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
-             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
-             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
-              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
-              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
-              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15}}});
-    std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<int, 2, 5>{
-            {{23600, 23600, 23600, 23600, 23600}, {68600, 68600, 68600, 68600, 68600}}});
-
-    std::shared_ptr<Node> myMatMul = MatMul(75, 5, "mymatmul");
+    const std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> dis(0.0, 1.0); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<std::size_t> distDims(10, 100);
+    std::uniform_int_distribution<std::size_t> distNbMatrix(1, 5);
+
+    // Create MatMul Operator
+    std::shared_ptr<Node> myMatMul = MatMul();
     auto op = std::static_pointer_cast<OperatorTensor>(myMatMul -> getOperator());
-    op->associateInput(1, myWeights);
-
-    SECTION("2D input") {
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<int, 2, 75>{
-                {{0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18,
-                  19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
-                  38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
-                  57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74},
-                 {75,  76,  77,  78,  79,  80,  81,  82,  83,  84,  85,  86,  87,  88,  89,
-                  90,  91,  92,  93,  94,  95,  96,  97,  98,  99,  100, 101, 102, 103, 104,
-                  105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
-                  120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
-                  135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}});
-        op->associateInput(0, myInput);
-        op->setDataType(DataType::Int32);
-        op->setBackend("cpu");
-        op->computeOutputDims();
-        myMatMul->forward();
-        REQUIRE(*(op->getOutput(0)) == *myOutput);
+
+    // To measure execution time of 'MatMul_Op::forward()' member function call
+    std::chrono::time_point<std::chrono::system_clock> start;
+    std::chrono::time_point<std::chrono::system_clock> end;
+    std::chrono::duration<double, std::micro> duration;
+
+    SECTION("2-D Tensors") {
+        std::size_t totalComputation = 0;
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            // generate Tensors dimensions
+            const std::size_t dim0 = distDims(gen);
+            const std::size_t dim1 = distDims(gen);
+            const std::size_t dim2 = distDims(gen);
+            totalComputation += dim0*dim1*dim2;
+
+            // Create and populate the array with random float values
+            float bigArray1[dim0][dim1];
+            for (int i = 0; i < dim0; ++i) {
+                for (int j = 0; j < dim1; ++j) {
+                    bigArray1[i][j] = dis(gen); // Generate random float value
+                }
+            }
+            float bigArray2[dim1][dim2];
+            for (int i = 0; i < dim1; ++i) {
+                for (int j = 0; j < dim2; ++j) {
+                    bigArray2[i][j] = dis(gen); // Generate random float value
+                }
+            }
+            float res[dim0][dim2];
+            for (int i = 0; i < dim0; ++i) {
+                for (int j = 0; j < dim2; ++j) {
+                    float sum = 0.0;
+                    for (int k = 0; k < dim1; ++k) {
+                        sum += bigArray1[i][k] * bigArray2[k][j];
+                    }
+                    res[i][j] = sum;
+                }
+            }
+
+
+            // Convert bigArray1 to Tensor
+            std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(DataType::Float32);
+            T1 -> resize({dim0,dim1});
+            T1 -> setBackend("cpu");
+            T1 -> getImpl() -> setRawPtr(&bigArray1[0][0], dim0*dim1);
+            // Convert bigArray2 to Tensor
+            std::shared_ptr<Tensor> T2 = std::make_shared<Tensor>(DataType::Float32);
+            T2 -> resize({dim1,dim2});
+            T2 -> setBackend("cpu");
+            T2 -> getImpl() -> setRawPtr(&bigArray2[0][0], dim1*dim2);
+            // convert res to Tensor
+            std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32);
+            Tres -> resize({dim0,dim2});
+            Tres -> setBackend("cpu");
+            Tres -> getImpl() -> setRawPtr(&res[0][0], dim0*dim2);
+
+            op->associateInput(0, T1);
+            op->associateInput(1, T2);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            op->computeOutputDims();
+            start = std::chrono::system_clock::now();
+            myMatMul->forward();
+            end = std::chrono::system_clock::now();
+            duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+            REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+        }
+        std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
+        std::cout << "total time: " << duration.count() << std::endl;
     }
-    SECTION("4D input") {
-        std::shared_ptr<Tensor> myInput =
-                std::make_shared<Tensor>(Array4D<int, 2, 3, 5, 5>{{{{{0, 1, 2, 3, 4},
-                                                                     {5, 6, 7, 8, 9},
-                                                                     {10, 11, 12, 13, 14},
-                                                                     {15, 16, 17, 18, 19},
-                                                                     {20, 21, 22, 23, 24}},
-                                                                    {{25, 26, 27, 28, 29},
-                                                                     {30, 31, 32, 33, 34},
-                                                                     {35, 36, 37, 38, 39},
-                                                                     {40, 41, 42, 43, 44},
-                                                                     {45, 46, 47, 48, 49}},
-                                                                    {{50, 51, 52, 53, 54},
-                                                                     {55, 56, 57, 58, 59},
-                                                                     {60, 61, 62, 63, 64},
-                                                                     {65, 66, 67, 68, 69},
-                                                                     {70, 71, 72, 73, 74}}},
-                                                                   {{{75, 76, 77, 78, 79},
-                                                                     {80, 81, 82, 83, 84},
-                                                                     {85, 86, 87, 88, 89},
-                                                                     {90, 91, 92, 93, 94},
-                                                                     {95, 96, 97, 98, 99}},
-                                                                    {{100, 101, 102, 103, 104},
-                                                                     {105, 106, 107, 108, 109},
-                                                                     {110, 111, 112, 113, 114},
-                                                                     {115, 116, 117, 118, 119},
-                                                                     {120, 121, 122, 123, 124}},
-                                                                    {{125, 126, 127, 128, 129},
-                                                                     {130, 131, 132, 133, 134},
-                                                                     {135, 136, 137, 138, 139},
-                                                                     {140, 141, 142, 143, 144},
-                                                                     {145, 146, 147, 148, 149}}}}});
-        op->associateInput(0, myInput);
-        op->setDataType(DataType::Int32);
+
+    SECTION("3-D Tensors") {
+        std::size_t totalComputation = 0;
+        duration = std::chrono::duration<double, std::micro>::zero();
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            // generate Tensors dimensions
+            const std::size_t dimNb = distNbMatrix(gen);
+            const std::size_t dim0 = distDims(gen);
+            const std::size_t dim1 = distDims(gen);
+            const std::size_t dim2 = distDims(gen);
+            totalComputation += dim0*dim1*dim2*dimNb;
+
+            // Create and populate the array with random float values
+            float bigArray1[dimNb][dim0][dim1];
+            for (std::size_t n = 0; n < dimNb; ++n) {
+                for (std::size_t i = 0; i < dim0; ++i) {
+                    for (std::size_t j = 0; j < dim1; ++j) {
+                        bigArray1[n][i][j] = dis(gen); // Generate random float value
+                    }
+                }
+            }
+            float bigArray2[dimNb][dim1][dim2];
+            for (std::size_t n = 0; n < dimNb; ++n) {
+                for (int i = 0; i < dim1; ++i) {
+                    for (int j = 0; j < dim2; ++j) {
+                        bigArray2[n][i][j] = dis(gen); // Generate random float value
+                    }
+                }
+            }
+            float res[dimNb][dim0][dim2];
+            for (std::size_t n = 0; n < dimNb; ++n) {
+                for (int i = 0; i < dim0; ++i) {
+                    for (int j = 0; j < dim2; ++j) {
+                        float sum = 0.0;
+                        for (int k = 0; k < dim1; ++k) {
+                            sum += bigArray1[n][i][k] * bigArray2[n][k][j];
+                        }
+                        res[n][i][j] = sum;
+                    }
+                }
+            }
+            // Convert bigArray1 to Tensor
+            std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(DataType::Float32);
+            T1 -> resize({dimNb,dim0,dim1});
+            T1 -> setBackend("cpu");
+            T1 -> getImpl() -> setRawPtr(&bigArray1[0][0], dimNb*dim0*dim1);
+            // Convert bigArray2 to Tensor
+            std::shared_ptr<Tensor> T2 = std::make_shared<Tensor>(DataType::Float32);
+            T2 -> resize({dimNb,dim1,dim2});
+            T2 -> setBackend("cpu");
+            T2 -> getImpl() -> setRawPtr(&bigArray2[0][0], dimNb*dim1*dim2);
+            // convert res to Tensor
+            std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32);
+            Tres -> resize({dimNb,dim0,dim2});
+            Tres -> setBackend("cpu");
+            Tres -> getImpl() -> setRawPtr(&res[0][0], dimNb*dim0*dim2);
+
+            op->associateInput(0, T1);
+            op->associateInput(1, T2);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            op->computeOutputDims();
+            start = std::chrono::system_clock::now();
+            myMatMul->forward();
+            end = std::chrono::system_clock::now();
+            duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+            REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+        }
+        std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
+        std::cout << "total time: " << duration.count() << std::endl;
+    }
+
+    SECTION("4-D Tensors") {
+        std::size_t totalComputation = 0;
+        duration = std::chrono::duration<double, std::micro>::zero();
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            // generate Tensors dimensions
+            const std::size_t dimNb1 = distNbMatrix(gen);
+            const std::size_t dimNb2 = distNbMatrix(gen);
+            const std::size_t dim0 = distDims(gen);
+            const std::size_t dim1 = distDims(gen);
+            const std::size_t dim2 = distDims(gen);
+            totalComputation += dim0*dim1*dim2*dimNb1*dimNb2;
+
+            // Create and populate the array with random float values
+            float bigArray1[dimNb1][dimNb2][dim0][dim1];
+            for (std::size_t n1 = 0; n1 < dimNb1; ++n1) {
+                for (std::size_t n2 = 0; n2 < dimNb2; ++n2) {
+                    for (std::size_t i = 0; i < dim0; ++i) {
+                        for (std::size_t j = 0; j < dim1; ++j) {
+                            bigArray1[n1][n2][i][j] = dis(gen); // Generate random float value
+                        }
+                    }
+                }
+            }
+            float bigArray2[dimNb1][dimNb2][dim1][dim2];
+            for (std::size_t n1 = 0; n1 < dimNb1; ++n1) {
+                for (std::size_t n2 = 0; n2 < dimNb2; ++n2) {
+                    for (std::size_t i = 0; i < dim1; ++i) {
+                        for (std::size_t j = 0; j < dim2; ++j) {
+                            bigArray2[n1][n2][i][j] = dis(gen); // Generate random float value
+                        }
+                    }
+                }
+            }
+            float res[dimNb1][dimNb2][dim0][dim2];
+            for (std::size_t n1 = 0; n1 < dimNb1; ++n1) {
+                for (std::size_t n2 = 0; n2 < dimNb2; ++n2) {
+                    for (int i = 0; i < dim0; ++i) {
+                        for (int j = 0; j < dim2; ++j) {
+                            float sum = 0.0;
+                            for (int k = 0; k < dim1; ++k) {
+                                sum += bigArray1[n1][n2][i][k] * bigArray2[n1][n2][k][j];
+                            }
+                            res[n1][n2][i][j] = sum;
+                        }
+                    }
+                }
+            }
+            // Convert bigArray1 to Tensor
+            std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>(DataType::Float32);
+            T1 -> resize({dimNb1,dimNb2,dim0,dim1});
+            T1 -> setBackend("cpu");
+            T1 -> getImpl() -> setRawPtr(&bigArray1[0][0], dimNb1*dimNb2*dim0*dim1);
+            // Convert bigArray2 to Tensor
+            std::shared_ptr<Tensor> T2 = std::make_shared<Tensor>(DataType::Float32);
+            T2 -> resize({dimNb1,dimNb2,dim1,dim2});
+            T2 -> setBackend("cpu");
+            T2 -> getImpl() -> setRawPtr(&bigArray2[0][0], dimNb1*dimNb2*dim1*dim2);
+            // convert res to Tensor
+            std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>(DataType::Float32);
+            Tres -> resize({dimNb1,dimNb2,dim0,dim2});
+            Tres -> setBackend("cpu");
+            Tres -> getImpl() -> setRawPtr(&res[0][0], dimNb1*dimNb2*dim0*dim2);
+
+            op->associateInput(0, T1);
+            op->associateInput(1, T2);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            op->computeOutputDims();
+            start = std::chrono::system_clock::now();
+            myMatMul->forward();
+            end = std::chrono::system_clock::now();
+            duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+            REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+        }
+        std::cout << "multiplications over time spent: " << totalComputation/duration.count() << std::endl;
+        std::cout << "total time: " << duration.count() << std::endl;
+    }
+
+    SECTION("+2-D / 1-D") {
+        // allows to test both computation with a 1-D Tensor and broadcasting
+        // input_0
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        op->associateInput(0,T0);
+        const std::size_t dim0 = distNbMatrix(gen);
+        const std::size_t dim1 = distNbMatrix(gen) + 1;
+        const std::size_t dim2 = distNbMatrix(gen);
+        const std::size_t dim3 = distNbMatrix(gen);
+        T0->resize({dim0,dim1,dim2,dim3});
+        T0->setDataType(DataType::Float32);
+        T0->setBackend("cpu");
+
+        // input_1
+        std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+        op -> associateInput(1,T1);
+        T1->resize({dim3});
+        T1->setDataType(DataType::Float32);
+        T1->setBackend("cpu");
+
+        op->setDataType(DataType::Float32);
         op->setBackend("cpu");
         op->computeOutputDims();
         myMatMul->forward();
-        REQUIRE(*(op->getOutput(0)) == *myOutput);
-    }
 
-    // std::cout << static_cast<Tensor>((*myMatMul->getOperator())["weight"])[0][0][0][0] << std::endl;
-}
\ No newline at end of file
+    }
+}
+} // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index 71646c92fa7f041d695a89858cf21ab0d0336f2c..c0e9be1c6062eaf311d5eaf2515df2b4fd2b8a9e 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -14,6 +14,7 @@
 #include <cstdlib>
 #include <memory>
 
+#include "aidge/utils/TensorUtils.hpp"
 #include "aidge/backend/cpu/operator/ConvImpl.hpp"
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
 #include "aidge/data/Tensor.hpp"
@@ -21,10 +22,12 @@
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/MetaOperatorDefs.hpp"
 #include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Pop.hpp"
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] MetaOperator/PaddedConv(forward)", "[MetaOperator][PaddedConv][CPU]") {
+TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
+  SECTION("PaddedConv(forward)") {
     std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(
             Array4D<double, 4, 3, 3, 3>{{{{{6.20986394e-01, 1.19775136e-03, 7.22876095e-02},
                                           {1.16492919e-01, 8.21634093e-02, 1.17413265e-01},
@@ -187,4 +190,240 @@ TEST_CASE("[cpu/operator] MetaOperator/PaddedConv(forward)", "[MetaOperator][Pad
 
     std::shared_ptr<Node> myPaddedConv =
             PaddedConv(3, 4, {3, 3}, "myPaddedConv", {1, 1}, {1, 1, 1, 1});
+  }
+    SECTION("LSTM(forward)") {
+        auto pop = Pop();
+        auto myLSTM = LSTM(32, 64, 0, true, "ltsm");
+        auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator());
+
+        auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraph();
+        microGraph->save("lstm", false, false);
+
+        REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
+        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->nbOutputs() == 2);
+
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+            Array2D<float, 16, 32>{});
+        std::shared_ptr<Tensor> myInit = std::make_shared<Tensor>(
+            Array2D<float, 1, 64>{});
+        std::shared_ptr<Tensor> myInitW = std::make_shared<Tensor>(
+            Array2D<float, 64, 32>{});
+        std::shared_ptr<Tensor> myInitR = std::make_shared<Tensor>(
+            Array2D<float, 64, 64>{});
+
+        pop->addChild(myLSTM, 0, 0);
+        pop->getOperator()->associateInput(0, myInput);
+        op->associateInput(17, myInit);
+        op->associateInput(18, myInit);
+
+        // Weights X
+        myLSTM->input(1).first->getOperator()->setOutput(0, myInitW);
+        myLSTM->input(2).first->getOperator()->setOutput(0, myInitW);
+        myLSTM->input(3).first->getOperator()->setOutput(0, myInitW);
+        myLSTM->input(4).first->getOperator()->setOutput(0, myInitW);
+        // Weights H
+        myLSTM->input(5).first->getOperator()->setOutput(0, myInitR);
+        myLSTM->input(6).first->getOperator()->setOutput(0, myInitR);
+        myLSTM->input(7).first->getOperator()->setOutput(0, myInitR);
+        myLSTM->input(8).first->getOperator()->setOutput(0, myInitR);
+
+        auto g = getConnectedGraphView(myLSTM);
+        g->setDataType(DataType::Float32);
+        g->setBackend("cpu");
+
+        auto scheduler = SequentialScheduler(g);
+        scheduler.forward(true, true);
+
+        g->save("lstm_outside_dims", true, true);
+
+        microGraph->save("lstm_dims", true, true);
+        REQUIRE(op->outputDimsForwarded());
+
+        auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraphScheduler();
+        microGraphScheduler->saveSchedulingDiagram("lstm_scheduling");
+
+        REQUIRE(op->getNbConsumedData(0) == 512);
+        REQUIRE(op->getNbConsumedData(1) == 32768);
+        REQUIRE(op->getNbProducedData(0) == 1088);
+        REQUIRE(op->getNbProducedData(1) == 1088);
+        REQUIRE(microGraphScheduler->getStaticScheduling(0).size() == 26);
+        REQUIRE(microGraphScheduler->getStaticScheduling(1).size() == 24);
+        REQUIRE(microGraphScheduler->getStaticScheduling(15).size() == 24);
+    }
+    SECTION("LSTM(forward_values)") {
+        auto myLSTM = LSTM(2, 3, 0, true, "ltsm");
+        auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator());
+
+        auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraph();
+        microGraph->save("lstm", false, false);
+
+        REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
+        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->nbOutputs() == 2);
+
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+            Array2D<float, 3, 2>{{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}});
+        std::shared_ptr<Tensor> myInit = std::make_shared<Tensor>(
+            Array2D<float, 3, 3>{{{0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}});
+        std::shared_ptr<Tensor> myInitW = std::make_shared<Tensor>(
+            Array2D<float, 3, 2>{{{0.1, 0.1}, {0.1, 0.1}, {0.1, 0.1}}});
+        std::shared_ptr<Tensor> myInitR = std::make_shared<Tensor>(
+            Array2D<float, 3, 3>{{{0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}}});
+
+        op->associateInput(0, myInput);
+        op->associateInput(17, myInit);
+        op->associateInput(18, myInit);
+
+        // Weights X
+        myLSTM->input(1).first->getOperator()->setOutput(0, myInitW);
+        myLSTM->input(2).first->getOperator()->setOutput(0, myInitW);
+        myLSTM->input(3).first->getOperator()->setOutput(0, myInitW);
+        myLSTM->input(4).first->getOperator()->setOutput(0, myInitW);
+        // Weights H
+        myLSTM->input(5).first->getOperator()->setOutput(0, myInitR);
+        myLSTM->input(6).first->getOperator()->setOutput(0, myInitR);
+        myLSTM->input(7).first->getOperator()->setOutput(0, myInitR);
+        myLSTM->input(8).first->getOperator()->setOutput(0, myInitR);
+
+        auto g = getConnectedGraphView(myLSTM);
+        g->setDataType(DataType::Float32);
+        g->setBackend("cpu");
+
+        auto scheduler = SequentialScheduler(g);
+        scheduler.forward();
+
+        microGraph->save("lstm_values_dims", false, true);
+
+        std::shared_ptr<Tensor> myHiddenState = std::make_shared<Tensor>(
+                Array2D<float, 3, 3>{{{0.0952412, 0.0952412, 0.0952412},
+                                     {0.25606447, 0.25606447, 0.25606447},
+                                     {0.40323776, 0.40323776, 0.40323776}}});
+
+
+        auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraphScheduler();
+        microGraphScheduler->saveSchedulingDiagram("lstm_values_scheduling");
+
+        op->getOutput(0)->print();
+        myHiddenState->print();
+
+        REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState));
+    }
+    SECTION("LSTM(forward_values_seq)") {
+        auto pop = Pop();
+        auto myLSTM = LSTM(2, 3, 2, true, "ltsm");
+        auto myGraph = Sequential({pop, myLSTM});
+        auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator());
+
+        REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
+        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->nbOutputs() == 2);
+
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+            Array3D<float, 2, 3, 2>{{{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}, {{2.0, 3.0}, {4.0, 5.0}, {6.0, 7.0}}}});
+        std::shared_ptr<Tensor> myInit = std::make_shared<Tensor>(
+            Array2D<float, 3, 3>{{{0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}});
+        std::shared_ptr<Tensor> myInitW = std::make_shared<Tensor>(
+            Array2D<float, 3, 2>{{{0.1, 0.1}, {0.1, 0.1}, {0.1, 0.1}}});
+        std::shared_ptr<Tensor> myInitR = std::make_shared<Tensor>(
+            Array2D<float, 3, 3>{{{0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}}});
+
+        pop->getOperator()->associateInput(0, myInput);
+        op->associateInput(17, myInit);
+        op->associateInput(18, myInit);
+
+        // Weights X
+        myLSTM->input(1).first->getOperator()->setOutput(0, myInitW);
+        myLSTM->input(2).first->getOperator()->setOutput(0, myInitW);
+        myLSTM->input(3).first->getOperator()->setOutput(0, myInitW);
+        myLSTM->input(4).first->getOperator()->setOutput(0, myInitW);
+        // Weights H
+        myLSTM->input(5).first->getOperator()->setOutput(0, myInitR);
+        myLSTM->input(6).first->getOperator()->setOutput(0, myInitR);
+        myLSTM->input(7).first->getOperator()->setOutput(0, myInitR);
+        myLSTM->input(8).first->getOperator()->setOutput(0, myInitR);
+
+        auto g = getConnectedGraphView(myLSTM);
+        g->setDataType(DataType::Float32);
+        g->setBackend("cpu");
+
+        g->save("lstm_seq", true, true);
+
+        auto scheduler = SequentialScheduler(g);
+        scheduler.forward(true, true);
+        scheduler.saveSchedulingDiagram("lstm_seq_schedule");
+
+        std::shared_ptr<Tensor> myHiddenState = std::make_shared<Tensor>(
+                Array2D<float, 3, 3>{{{0.24439372, 0.24439372, 0.24439372},
+                                     {0.49801484, 0.49801484, 0.49801484},
+                                     {0.67162132, 0.67162132, 0.67162132}}});
+
+        myGraph->save("lstm_seq_mygraph", true, true);
+
+        op->getOutput(0)->print();
+        myHiddenState->print();
+
+        REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState));
+    }
+    SECTION("LSTM(forward_values_seq_flatten)") {
+        auto pop = Pop();
+        auto myLSTM = LSTM(2, 3, 2, true, "ltsm");
+        auto op = std::static_pointer_cast<MetaOperator_Op>(myLSTM->getOperator());
+
+        // Here we test LSTM as it is was flatten in the graph.
+        // We just borrow its micro-graph into our larger myGraph graph.
+        auto myGraph = std::make_shared<GraphView>();
+        pop->addChild(op->getMicroGraph()->getOrderedInputs()[0].first, 0, 0);
+        myGraph->add(op->getMicroGraph());
+        myGraph->add(pop);
+
+        REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
+        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->nbOutputs() == 2);
+
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+            Array3D<float, 2, 3, 2>{{{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}, {{2.0, 3.0}, {4.0, 5.0}, {6.0, 7.0}}}});
+        std::shared_ptr<Tensor> myInit = std::make_shared<Tensor>(
+            Array2D<float, 3, 3>{{{0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}});
+        std::shared_ptr<Tensor> myInitW = std::make_shared<Tensor>(
+            Array2D<float, 3, 2>{{{0.1, 0.1}, {0.1, 0.1}, {0.1, 0.1}}});
+        std::shared_ptr<Tensor> myInitR = std::make_shared<Tensor>(
+            Array2D<float, 3, 3>{{{0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}}});
+
+        pop->getOperator()->associateInput(0, myInput);
+        op->associateInput(17, myInit);
+        op->associateInput(18, myInit);
+
+        // Weights X
+        auto prodX = Producer(myInitW);
+        prodX->addChild(op->getMicroGraph()->getOrderedInputs()[1].first, 0, 1);
+        prodX->addChild(op->getMicroGraph()->getOrderedInputs()[2].first, 0, 1);
+        prodX->addChild(op->getMicroGraph()->getOrderedInputs()[3].first, 0, 1);
+        prodX->addChild(op->getMicroGraph()->getOrderedInputs()[4].first, 0, 1);
+        // Weights H
+        auto prodH = Producer(myInitR);
+        prodH->addChild(op->getMicroGraph()->getOrderedInputs()[5].first, 0, 1);
+        prodH->addChild(op->getMicroGraph()->getOrderedInputs()[6].first, 0, 1);
+        prodH->addChild(op->getMicroGraph()->getOrderedInputs()[7].first, 0, 1);
+        prodH->addChild(op->getMicroGraph()->getOrderedInputs()[8].first, 0, 1);
+        myGraph->add({prodX, prodH});
+
+        myGraph->setDataType(DataType::Float32);
+        myGraph->setBackend("cpu");
+        myGraph->save("lstm_seq_flatten", true, true);
+
+        std::shared_ptr<Tensor> myHiddenState = std::make_shared<Tensor>(
+                Array2D<float, 3, 3>{{{0.24439372, 0.24439372, 0.24439372},
+                                     {0.49801484, 0.49801484, 0.49801484},
+                                     {0.67162132, 0.67162132, 0.67162132}}});
+
+        auto scheduler = SequentialScheduler(myGraph);
+        scheduler.forward(true, true);
+        scheduler.saveSchedulingDiagram("lstm_seq_flatten_schedule");
+
+        op->getOutput(0)->print();
+        myHiddenState->print();
+
+        REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState));
+    }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MulImpl.cpp b/unit_tests/operator/Test_MulImpl.cpp
index 1707bc81e0bb549bfe90078242f8a4eae77db3c3..5b5a05764ecb0298a08c3e9ceece448d46e63044 100644
--- a/unit_tests/operator/Test_MulImpl.cpp
+++ b/unit_tests/operator/Test_MulImpl.cpp
@@ -10,123 +10,307 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::uint16_t
+#include <chrono>
+#include <iostream>
+#include <memory>
+#include <numeric>   // std::accumulate
+#include <random>    // std::random_device, std::mt19937, std::uniform_real_distribution
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Mul.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 
-#include "aidge/backend/cpu.hpp"
+namespace Aidge {
 
-#include <memory>
+TEST_CASE("[cpu/operator] Mul", "[Mul][CPU]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
+    std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
+    std::uniform_int_distribution<int> boolDist(0,1);
 
-using namespace Aidge;
+    // Create MatMul Operator
+    std::shared_ptr<Node> myMul = Mul();
+    auto op = std::static_pointer_cast<OperatorTensor>(myMul-> getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // Create 2 input Tensors
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op->associateInput(0,T0);
+    T0->setDataType(DataType::Float32);
+    T0->setBackend("cpu");
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+    T1->setDataType(DataType::Float32);
+    T1->setBackend("cpu");
+
+    // Create results Tensor
+    std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>();
+    Tres->setDataType(DataType::Float32);
+    Tres->setBackend("cpu");
+
+    // To measure execution time of 'MatMul_Op::forward()' member function call
+    std::chrono::time_point<std::chrono::system_clock> start;
+    std::chrono::time_point<std::chrono::system_clock> end;
+    std::chrono::duration<double, std::micro> duration{};
+
+    SECTION("MulImpl_cpu::forward()") {
+        SECTION("Scalar / Scalar") {
 
-TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") {
-    SECTION("2D Tensor by Singleton") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.38977361, 0.34064174},
-                {0.00427264, 0.90872520}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{3.0}});
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {1.16932082, 1.02192521},
-                {0.01281792, 2.72617555}
-            }
-        });
-
-        std::shared_ptr<Node> myMul = Mul();
-        auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
-        myMul->getOperator()->associateInput(0, input_1);
-        myMul->getOperator()->associateInput(1, input_2);
-        myMul->getOperator()->setDataType(DataType::Float32);
-        myMul->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        myMul->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
         }
+        SECTION("Scalar / +1-D Tensor") {
 
-    }
+        }
+        SECTION("+1-D Tensor / +1-D Tensor - same dimensions") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("2D Tensors") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.38977361, 0.34064174},
-                {0.00427264, 0.90872520}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
-            {
-                {0.02362096, 0.24084556},
-                {0.94690859, 0.13512510}
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.00920683, 0.08204205},
-                {0.00404580, 0.12279158}
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                const std::size_t nbDims = nbDimsDist(gen);
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
+
+                // without broadcasting
+                float* array0 = new float[nb_elements];
+                float* array1 = new float[nb_elements];
+                float* result = new float[nb_elements];
+
+                for (std::size_t i = 0; i < nb_elements; ++i) {
+                    array0[i] = valueDist(gen);
+                    array1[i] = valueDist(gen);
+                    result[i] = array0[i] * array1[i];
+                }
+
+                // input0
+                T0->resize(dims);
+                T0 -> getImpl() -> setRawPtr(array0, nb_elements);
+
+                // input1
+                T1->resize(dims);
+                T1 -> getImpl() -> setRawPtr(array1, nb_elements);
+
+                // results
+                Tres->resize(dims);
+                Tres -> getImpl() -> setRawPtr(result, nb_elements);
+
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myMul->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                // with broadcasting
             }
-        });
-
-        std::shared_ptr<Node> myMul = Mul();
-        auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
-        myMul->getOperator()->associateInput(0, input_1);
-        myMul->getOperator()->associateInput(1, input_2);
-        myMul->getOperator()->setDataType(DataType::Float32);
-        myMul->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        myMul->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
 
-    }
+        SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("3D Tensor by 1D Tensor") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.33647752, 0.89360154, 0.46586215},
-                 {0.71518236, 0.71481097, 0.97991812}},
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions, replace some dimensions with '1' to get broadcasting
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                std::vector<std::size_t> dims0 = dims;
+                std::vector<std::size_t> dims1 = dims;
+                std::vector<std::size_t> dimsOut = dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims0[i] = 1;
+                    }
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                    dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i];
+                }
 
-                {{0.17393428, 0.56849813, 0.18489265},
-                 {0.78397650, 0.00348300, 0.65758008}}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array1D<float,3>{
-            {0.15380561, 0.51063120, 0.93031412}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.05175213, 0.45630082, 0.43339813},
-                 {0.10999906, 0.36500478, 0.91163164}},
-
-                {{0.02675207, 0.29029289, 0.17200825},
-                 {0.12057999, 0.00177853, 0.61175603}}
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) {
+                    array1[i] = valueDist(gen);
+                }
+
+                // compute true result
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1[2] > 1) ? c : 0)
+                                                    + ((dims1[3] > 1) ? d : 0);
+                                result[idx_out + d] = array0[idx0] * array1[idx1];
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " * " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myMul->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
             }
-        });
-
-        std::shared_ptr<Node> myMul = Mul();
-        auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
-        myMul->getOperator()->associateInput(0, input_1);
-        myMul->getOperator()->associateInput(1, input_2);
-        myMul->getOperator()->setDataType(DataType::Float32);
-        myMul->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        myMul->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 12; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
+        SECTION("+1-D Tensor / 1-D Tensor") {
+            std::size_t number_of_operation = 0;
+            std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(std::size_t(1), std::size_t(3));
+
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims0(4);
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims0[i] = dimSizeDist(gen);
+                }
+                std::vector<std::size_t> dimsOut = dims0;
+                std::vector<std::size_t> dims1 = dims0;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                }
+                dims1.erase(dims1.cbegin(), dims1.cbegin() + nbRemovedDimsDist(gen));
+
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                std::size_t array1_size = std::accumulate(dims1.cbegin(), dims1.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                float* array1 = new float[array1_size];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < (dims0[0]*dims0[1]*dims0[2]*dims0[3]); ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                for (std::size_t i = 0; i < array1_size; ++i) {
+                    array1[i] = valueDist(gen);
+                }
 
+                // compute true result
+                auto dims1_tmp = dims1;
+                dims1_tmp.insert(dims1_tmp.cbegin(), 4 - dims1_tmp.size(), std::size_t(1));
+
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1_tmp[1]*dims1_tmp[2]*dims1_tmp[3], dims1_tmp[2]*dims1_tmp[3], dims1_tmp[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1_tmp[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1_tmp[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1_tmp[2] > 1) ? c : 0)
+                                                    + ((dims1_tmp[3] > 1) ? d : 0);
+                                result[idx_out + d] = array0[idx0] * array1[idx1];
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " * " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, array1_size);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myMul->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
+            }
+
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
+        }
     }
-}
\ No newline at end of file
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_PaddedConv.cpp b/unit_tests/operator/Test_PaddedConv.cpp
index 3baf0a7aa0f366a8f0dd4e3e9df6700a5cdb0cea..03a592e52b7d057065353a7d99c088d9831c67c7 100644
--- a/unit_tests/operator/Test_PaddedConv.cpp
+++ b/unit_tests/operator/Test_PaddedConv.cpp
@@ -150,12 +150,15 @@ TEST_CASE("[cpu/operator] PaddedConv(forward)", "[PaddedConv][CPU]") {
         });
 
         myConv->getOperator()->associateInput(0,myInput);
-        myConv->getOperator()->associateInput(1,myWeights);
-        myConv->getOperator()->associateInput(2,myBias);
-        myConv->getOperator()->setDataType(DataType::Int32);
-        myConv->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        myConv->forward();
+        myConv->input(1).first->getOperator()->setOutput(0, myWeights);
+        myConv->input(2).first->getOperator()->setOutput(0, myBias);
+
+        auto g = getConnectedGraphView(myConv);
+        g->setDataType(DataType::Int32);
+        g->setBackend("cpu");
+
+        auto scheduler = SequentialScheduler(g);
+        scheduler.forward();
 
         REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
@@ -309,12 +312,15 @@ TEST_CASE("[cpu/operator] PaddedConv(forward)", "[PaddedConv][CPU]") {
         });
 
         myConv->getOperator()->associateInput(0,myInput);
-        myConv->getOperator()->associateInput(1,myWeights);
-        myConv->getOperator()->associateInput(2,myBias);
-        myConv->getOperator()->setDataType(DataType::Int32);
-        myConv->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        myConv->forward();
+        myConv->input(1).first->getOperator()->setOutput(0, myWeights);
+        myConv->input(2).first->getOperator()->setOutput(0, myBias);
+
+        auto g = getConnectedGraphView(myConv);
+        g->setDataType(DataType::Int32);
+        g->setBackend("cpu");
+
+        auto scheduler = SequentialScheduler(g);
+        scheduler.forward();
 
         REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
diff --git a/unit_tests/operator/Test_PowImpl.cpp b/unit_tests/operator/Test_PowImpl.cpp
index 0c95e785958aca72b5ae1f5727134552310e5bef..01f9760275923b2249e5b6098b83b4ae27d5fb30 100644
--- a/unit_tests/operator/Test_PowImpl.cpp
+++ b/unit_tests/operator/Test_PowImpl.cpp
@@ -10,198 +10,308 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <cmath>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::uint16_t
+#include <chrono>
+#include <iostream>
+#include <memory>
+#include <numeric>   // std::accumulate
+#include <random>    // std::random_device, std::mt19937, std::uniform_real_distribution
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Pow.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 
-#include "aidge/backend/cpu.hpp"
+namespace Aidge {
 
-#include <memory>
+TEST_CASE("[cpu/operator] Pow", "[Pow][CPU]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
+    std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
+    std::uniform_int_distribution<int> boolDist(0,1);
 
-using namespace Aidge;
+    // Create MatPow Operator
+    std::shared_ptr<Node> myPow = Pow();
+    auto op = std::static_pointer_cast<OperatorTensor>(myPow-> getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // Create 2 input Tensors
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op->associateInput(0,T0);
+    T0->setDataType(DataType::Float32);
+    T0->setBackend("cpu");
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+    T1->setDataType(DataType::Float32);
+    T1->setBackend("cpu");
+
+    // Create results Tensor
+    std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>();
+    Tres->setDataType(DataType::Float32);
+    Tres->setBackend("cpu");
+
+    // To measure execution time of 'MatPow_Op::forward()' member function call
+    std::chrono::time_point<std::chrono::system_clock> start;
+    std::chrono::time_point<std::chrono::system_clock> end;
+    std::chrono::duration<double, std::micro> duration{};
+
+    SECTION("PowImpl_cpu::forward()") {
+        SECTION("Scalar / Scalar") {
 
-TEST_CASE("[cpu/operator] Pow(forward)", "[Pow][CPU]") {
-    SECTION("2D Tensor by Singleton") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.42139274, 0.51524192},
-                {0.85247433, 0.13432795}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{2.0}});
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.17757183, 0.26547423},
-                {0.72671247, 0.01804400}
-            }
-        });
-
-        std::shared_ptr<Node> myPow = Pow();
-        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
-        op->associateInput(0, input_1);
-        op->associateInput(1, input_2);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        op->computeOutputDims();
-        myPow->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
         }
+        SECTION("Scalar / +1-D Tensor") {
 
-    }
+        }
+        SECTION("+1-D Tensor / +1-D Tensor - same dimensions") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("3D Tensor by 1D Tensor") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.87519985, 0.10536593, 0.20268351},
-                 {0.75532353, 0.95977652, 0.03897029}},
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                const std::size_t nbDims = nbDimsDist(gen);
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
 
-                {{0.67554104, 0.35499334, 0.27741563},
-                 {0.94270861, 0.48397779, 0.35532343}}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array1D<float,3>{
-            {0.39333701, 0.08719915, 0.16713941}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.94891787, 0.82182676, 0.76584703},
-                 {0.89549923, 0.99642646, 0.58137459}},
-
-                {{0.85702944, 0.91364944, 0.80709606},
-                 {0.97706109, 0.93867886, 0.84118503}}
+                // without broadcasting
+                float* array0 = new float[nb_elements];
+                float* array1 = new float[nb_elements];
+                float* result = new float[nb_elements];
+
+                for (std::size_t i = 0; i < nb_elements; ++i) {
+                    array0[i] = valueDist(gen);
+                    array1[i] = valueDist(gen);
+                    result[i] = std::pow(array0[i], array1[i]);
+                }
+
+                // input0
+                T0->resize(dims);
+                T0 -> getImpl() -> setRawPtr(array0, nb_elements);
+
+                // input1
+                T1->resize(dims);
+                T1 -> getImpl() -> setRawPtr(array1, nb_elements);
+
+                // results
+                Tres->resize(dims);
+                Tres -> getImpl() -> setRawPtr(result, nb_elements);
+
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myPow->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                // with broadcasting
             }
-        });
-
-        std::shared_ptr<Node> myPow = Pow();
-        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
-        op->associateInput(0, input_1);
-        op->associateInput(1, input_2);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        op->computeOutputDims();
-        myPow->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 12; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
 
-    }
+        SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("2D Tensors") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.79780143, 0.49322051},
-                {0.84239346, 0.83737719}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
-            {
-                {0.59088874, 0.78858775},
-                {0.42879432, 0.17615074}
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.87504572, 0.57271165},
-                {0.92909741, 0.96922028}
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions, replace some dimensions with '1' to get broadcasting
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                std::vector<std::size_t> dims0 = dims;
+                std::vector<std::size_t> dims1 = dims;
+                std::vector<std::size_t> dimsOut = dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims0[i] = 1;
+                    }
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                    dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i];
+                }
+
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) {
+                    array1[i] = valueDist(gen);
+                }
+
+                // compute true result
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1[2] > 1) ? c : 0)
+                                                    + ((dims1[3] > 1) ? d : 0);
+                                result[idx_out + d] = std::pow(array0[idx0], array1[idx1]);
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " ** " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myPow->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
             }
-        });
-
-        std::shared_ptr<Node> myPow = Pow();
-        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
-        op->associateInput(0, input_1);
-        op->associateInput(1, input_2);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        op->computeOutputDims();
-        myPow->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
+        SECTION("+1-D Tensor / 1-D Tensor") {
+            std::size_t number_of_operation = 0;
+            std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(std::size_t(1), std::size_t(3));
 
-    }
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims0(4);
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims0[i] = dimSizeDist(gen);
+                }
+                std::vector<std::size_t> dimsOut = dims0;
+                std::vector<std::size_t> dims1 = dims0;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                }
+                dims1.erase(dims1.cbegin(), dims1.cbegin() + nbRemovedDimsDist(gen));
 
-    SECTION("4D Tensor") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
-            {
-                {
-                    {{0.80191749, 0.45388508, 0.86550850},
-                     {0.47226250, 0.55809456, 0.59451854},
-                     {0.45497441, 0.02653158, 0.44041735}},
-                    {{0.30726379, 0.73146582, 0.46462774},
-                     {0.30268502, 0.78075552, 0.65154958},
-                     {0.91332406, 0.62448132, 0.53238851}},
-                    {{0.13917381, 0.43061519, 0.30198061},
-                     {0.12880909, 0.08995515, 0.29609048},
-                     {0.46449280, 0.47559714, 0.24193990}}
-                },
-                {
-                    {{0.87349969, 0.51625526, 0.16921073},
-                     {0.95035923, 0.10066575, 0.56729180},
-                     {0.84686232, 0.05965143, 0.03635806}},
-                    {{0.61107808, 0.59954077, 0.45627308},
-                     {0.84114522, 0.77186388, 0.37427086},
-                     {0.13415480, 0.00617349, 0.84260136}},
-                    {{0.55090177, 0.57292056, 0.29158932},
-                     {0.67131883, 0.96988875, 0.69545972},
-                     {0.80979776, 0.18238151, 0.19527155}}
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                std::size_t array1_size = std::accumulate(dims1.cbegin(), dims1.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                float* array1 = new float[array1_size];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < (dims0[0]*dims0[1]*dims0[2]*dims0[3]); ++i) {
+                    array0[i] = valueDist(gen);
                 }
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{2.0}});
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
-            {
-                {
-                    {{6.43071651e-01, 2.06011668e-01, 7.49104977e-01},
-                     {2.23031864e-01, 3.11469525e-01, 3.53452295e-01},
-                     {2.07001716e-01, 7.03924568e-04, 1.93967447e-01}},
-
-                    {{9.44110379e-02, 5.35042226e-01, 2.15878934e-01},
-                     {9.16182250e-02, 6.09579206e-01, 4.24516857e-01},
-                     {8.34160864e-01, 3.89976919e-01, 2.83437520e-01}},
-
-                    {{1.93693489e-02, 1.85429439e-01, 9.11922902e-02},
-                     {1.65917836e-02, 8.09192937e-03, 8.76695737e-02},
-                     {2.15753555e-01, 2.26192638e-01, 5.85349165e-02}}
-                },
-                {
-                    {{7.63001740e-01, 2.66519487e-01, 2.86322720e-02},
-                     {9.03182685e-01, 1.01335924e-02, 3.21819991e-01},
-                     {7.17175782e-01, 3.55829368e-03, 1.32190844e-03}},
-
-                    {{3.73416424e-01, 3.59449148e-01, 2.08185121e-01},
-                     {7.07525253e-01, 5.95773816e-01, 1.40078679e-01},
-                     {1.79975089e-02, 3.81119971e-05, 7.09977031e-01}},
-
-                    {{3.03492755e-01, 3.28237981e-01, 8.50243345e-02},
-                     {4.50668961e-01, 9.40684199e-01, 4.83664215e-01},
-                     {6.55772448e-01, 3.32630165e-02, 3.81309800e-02}}
+                for (std::size_t i = 0; i < array1_size; ++i) {
+                    array1[i] = valueDist(gen);
                 }
+
+                // compute true result
+                auto dims1_tmp = dims1;
+                dims1_tmp.insert(dims1_tmp.cbegin(), 4 - dims1_tmp.size(), std::size_t(1));
+
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1_tmp[1]*dims1_tmp[2]*dims1_tmp[3], dims1_tmp[2]*dims1_tmp[3], dims1_tmp[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1_tmp[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1_tmp[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1_tmp[2] > 1) ? c : 0)
+                                                    + ((dims1_tmp[3] > 1) ? d : 0);
+                                result[idx_out + d] = std::pow(array0[idx0], array1[idx1]);
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " ** " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, array1_size);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myPow->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
             }
-        });
-
-        std::shared_ptr<Node> myPow = Pow();
-        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
-        op->associateInput(0, input_1);
-        op->associateInput(1, input_2);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        op->computeOutputDims();
-        myPow->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 54; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
     }
-}
\ No newline at end of file
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_ReduceMeanImpl.cpp b/unit_tests/operator/Test_ReduceMeanImpl.cpp
index 494b7a6ace17173ef7b956bc9dabf4d27e665e5a..d9bf68b78d1ece371cbfb5cda3c502f82eaf97de 100644
--- a/unit_tests/operator/Test_ReduceMeanImpl.cpp
+++ b/unit_tests/operator/Test_ReduceMeanImpl.cpp
@@ -17,6 +17,7 @@
 #include "aidge/operator/Conv.hpp"
 
 #include "aidge/backend/cpu.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 
 using namespace Aidge;
 
@@ -138,35 +139,60 @@ TEST_CASE("[cpu/operator] ReduceMean(forward)", "[ReduceMean][CPU]") {
 
     }
     SECTION("all_axes") {
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
-            {
-                {
-                    { 5.0, 1.0 },
-                    { 20.0, 2.0 }
-                },
-                {
-                    { 30.0, 1.0 },
-                    { 40.0, 2.0 }
-                },
+        SECTION("1") {
+            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
                 {
-                    { 55.0, 1.0 },
-                    { 60.0, 2.0 }
+                    {
+                        { 5.0, 1.0 },
+                        { 20.0, 2.0 }
+                    },
+                    {
+                        { 30.0, 1.0 },
+                        { 40.0, 2.0 }
+                    },
+                    {
+                        { 55.0, 1.0 },
+                        { 60.0, 2.0 }
+                    }
                 }
-            }
-        });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> {
-            {18.25}
-        });
+            });
+            std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> {
+                {18.25}
+            });
 
-        std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1, 2}, 0);
-        auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
-        op->associateInput(0,myInput);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        op->computeOutputDims();
-        myReduceMean->forward();
-        op->getOutput(0)->print();
+            std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1, 2}, 0);
+            auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
+            op->associateInput(0,myInput);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            op->computeOutputDims();
+            myReduceMean->forward();
+            op->getOutput(0)->print();
 
-        REQUIRE(*(op->getOutput(0)) == *myOutput);
+            REQUIRE(*(op->getOutput(0)) == *myOutput);
+        }
+        SECTION("2") {
+            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<float,5,4> {
+               {{ 0.004232f, 0.105120f, 0.045124f, 0.009205f},
+                { 0.000766f, 0.272162f, 0.503560f, 0.044163f},
+                { 0.049755f, 0.000305f, 0.143634f, 0.013253f},
+                { 0.096258f, 0.311231f, 0.358143f, 0.000452f},
+                { 0.468617f, 0.015693f, 0.145316f, 0.000105f}}
+            });
+            std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> {
+                {0.1293547f}
+            });
+
+            std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1}, 0);
+            auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
+            op->associateInput(0,myInput);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            op->computeOutputDims();
+            myReduceMean->forward();
+            op->getOutput(0)->print();
+            // approxEq<float>(*(op->getOutput(0)), *myOutput);
+            REQUIRE(approxEq<float>(*(op->getOutput(0)), *myOutput));
+        }
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_SubImpl.cpp b/unit_tests/operator/Test_SubImpl.cpp
index dfd64078b77a557e07eb11cb958ac24eeb1f9aa3..f9ba894f081b76b3abd0f0909636a38eaee3601a 100644
--- a/unit_tests/operator/Test_SubImpl.cpp
+++ b/unit_tests/operator/Test_SubImpl.cpp
@@ -10,123 +10,307 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::uint16_t
+#include <chrono>
+#include <iostream>
+#include <memory>
+#include <numeric>   // std::accumulate
+#include <random>    // std::random_device, std::mt19937, std::uniform_real_distribution
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Sub.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 
-#include "aidge/backend/cpu.hpp"
+namespace Aidge {
 
-#include <memory>
+TEST_CASE("[cpu/operator] Sub", "[Sub][CPU]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
+    std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
+    std::uniform_int_distribution<int> boolDist(0,1);
 
-using namespace Aidge;
+    // Create MatMul Operator
+    std::shared_ptr<Node> mySub = Sub();
+    auto op = std::static_pointer_cast<OperatorTensor>(mySub-> getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // Create 2 input Tensors
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op->associateInput(0,T0);
+    T0->setDataType(DataType::Float32);
+    T0->setBackend("cpu");
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+    T1->setDataType(DataType::Float32);
+    T1->setBackend("cpu");
+
+    // Create results Tensor
+    std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>();
+    Tres->setDataType(DataType::Float32);
+    Tres->setBackend("cpu");
+
+    // To measure execution time of 'MatMul_Op::forward()' member function call
+    std::chrono::time_point<std::chrono::system_clock> start;
+    std::chrono::time_point<std::chrono::system_clock> end;
+    std::chrono::duration<double, std::micro> duration{};
+
+    SECTION("SubImpl_cpu::forward()") {
+        SECTION("Scalar / Scalar") {
 
-TEST_CASE("[cpu/operator] Sub(forward)", "[Sub][CPU]") {
-    SECTION("2D Tensor by Singleton") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.34234560, 0.92812711},
-                {0.73706615, 0.69953883}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{2.5}});
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {-2.15765429, -1.57187295},
-                {-1.76293385, -1.80046117}
-            }
-        });
-
-        std::shared_ptr<Node> mySub = Sub();
-        auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
-        mySub->getOperator()->associateInput(0, input_1);
-        mySub->getOperator()->associateInput(1, input_2);
-        mySub->getOperator()->setDataType(DataType::Float32);
-        mySub->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        mySub->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
         }
+        SECTION("Scalar / +1-D Tensor") {
 
-    }
+        }
+        SECTION("+1-D Tensor / +1-D Tensor - same dimensions") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("2D Tensors") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {0.34234560, 0.92812711},
-                {0.73706615, 0.69953883}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
-            {
-                {0.61729127, 0.83004373},
-                {0.72002089, 0.52473849}
-            }
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
-            {
-                {-0.27494568,  0.09808338},
-                {0.01704526,  0.17480034}
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                const std::size_t nbDims = nbDimsDist(gen);
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
+
+                // without broadcasting
+                float* array0 = new float[nb_elements];
+                float* array1 = new float[nb_elements];
+                float* result = new float[nb_elements];
+
+                for (std::size_t i = 0; i < nb_elements; ++i) {
+                    array0[i] = valueDist(gen);
+                    array1[i] = valueDist(gen);
+                    result[i] = array0[i] - array1[i];
+                }
+
+                // input0
+                T0->resize(dims);
+                T0 -> getImpl() -> setRawPtr(array0, nb_elements);
+
+                // input1
+                T1->resize(dims);
+                T1 -> getImpl() -> setRawPtr(array1, nb_elements);
+
+                // results
+                Tres->resize(dims);
+                Tres -> getImpl() -> setRawPtr(result, nb_elements);
+
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                mySub->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                // with broadcasting
             }
-        });
-
-        std::shared_ptr<Node> mySub = Sub();
-        auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
-        mySub->getOperator()->associateInput(0, input_1);
-        mySub->getOperator()->associateInput(1, input_2);
-        mySub->getOperator()->setDataType(DataType::Float32);
-        mySub->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        mySub->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 4; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
 
-    }
+        SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
+            std::size_t number_of_operation = 0;
 
-    SECTION("3D Tensor by 1D Tensor") {
-        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.84181279, 0.20655948, 0.09750116},
-                 {0.37723488, 0.73120135, 0.04666907}},
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions, replace some dimensions with '1' to get broadcasting
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                std::vector<std::size_t> dims0 = dims;
+                std::vector<std::size_t> dims1 = dims;
+                std::vector<std::size_t> dimsOut = dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims0[i] = 1;
+                    }
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                    dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i];
+                }
 
-                {{0.91483921, 0.93985939, 0.58823180},
-                 {0.39963132, 0.67879969, 0.33209187}}
-            }
-        });
-        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array1D<float,3>{
-            {0.04784805, 0.91903114, 0.38606840}
-        });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
-            {
-                {{0.79396474, -0.71247166, -0.28856725},
-                 {0.32938683, -0.18782979, -0.33939934}},
-
-                {{0.86699116,  0.02082825,  0.20216340},
-                 {0.35178328, -0.24023145, -0.05397654}}
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) {
+                    array1[i] = valueDist(gen);
+                }
+
+                // compute true result
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1[2] > 1) ? c : 0)
+                                                    + ((dims1[3] > 1) ? d : 0);
+                                result[idx_out + d] = array0[idx0] - array1[idx1];
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " - " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                mySub->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
             }
-        });
-
-        std::shared_ptr<Node> mySub = Sub();
-        auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
-        mySub->getOperator()->associateInput(0, input_1);
-        mySub->getOperator()->associateInput(1, input_2);
-        mySub->getOperator()->setDataType(DataType::Float32);
-        mySub->getOperator()->setBackend("cpu");
-        op->computeOutputDims();
-        mySub->forward();
-
-        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
-        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
-        for (std::size_t i = 0; i< 12; ++i) {
-            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
         }
+        SECTION("+1-D Tensor / 1-D Tensor") {
+            std::size_t number_of_operation = 0;
+            std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(std::size_t(1), std::size_t(3));
+
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate 2 random Tensors
+                // handle dimensions
+                constexpr std::size_t nbDims = 4;
+                std::vector<std::size_t> dims0(4);
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims0[i] = dimSizeDist(gen);
+                }
+                std::vector<std::size_t> dimsOut = dims0;
+                std::vector<std::size_t> dims1 = dims0;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    if (boolDist(gen)) {
+                        dims1[i] = 1;
+                    }
+                }
+                dims1.erase(dims1.cbegin(), dims1.cbegin() + nbRemovedDimsDist(gen));
+
+                // create arrays and fill them with random values
+                float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+                std::size_t array1_size = std::accumulate(dims1.cbegin(), dims1.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                float* array1 = new float[array1_size];
+                float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+                for (std::size_t i = 0; i < (dims0[0]*dims0[1]*dims0[2]*dims0[3]); ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                for (std::size_t i = 0; i < array1_size; ++i) {
+                    array1[i] = valueDist(gen);
+                }
 
+                // compute true result
+                auto dims1_tmp = dims1;
+                dims1_tmp.insert(dims1_tmp.cbegin(), 4 - dims1_tmp.size(), std::size_t(1));
+
+                const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+                const std::size_t strides1[nbDims] = {dims1_tmp[1]*dims1_tmp[2]*dims1_tmp[3], dims1_tmp[2]*dims1_tmp[3], dims1_tmp[3], 1};
+                for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+                    for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                        const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                                    + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                        const std::size_t idx1_0 = strides1[0] * ((dims1_tmp[0] > 1) ? a : 0)
+                                                    + strides1[1] * ((dims1_tmp[1] > 1) ? b : 0);
+                        for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                            const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                            for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                                std::size_t idx0 = idx0_0
+                                                    + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                                    + ((dims0[3] > 1) ? d : 0);
+                                std::size_t idx1 = idx1_0
+                                                    + strides1[2] * ((dims1_tmp[2] > 1) ? c : 0)
+                                                    + ((dims1_tmp[3] > 1) ? d : 0);
+                                result[idx_out + d] = array0[idx0] - array1[idx1];
+                                // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " - " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                            }
+                        }
+                    }
+                }
+
+                // conversion to Aidge::Tensors
+                // input0
+                T0->resize(dims0);
+                T0 -> getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+                // input1
+                T1->resize(dims1);
+                T1 -> getImpl() -> setRawPtr(array1, array1_size);
+
+                // results
+                Tres->resize(dimsOut);
+                Tres -> getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+                // compute result
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                mySub->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // comparison between truth and computed result
+                REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
+
+                delete[] array0;
+                delete[] array1;
+                delete[] result;
+
+                const std::size_t nb_elements = std::accumulate(dimsOut.cbegin(), dimsOut.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
+            }
+
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
+        }
     }
-}
\ No newline at end of file
+}
+} // namespace Aidge
diff --git a/unit_tests/recipies/Test_ExplicitCastMove.cpp b/unit_tests/recipies/Test_ExplicitCastMove.cpp
index 7d169ba9ba949ead0bf96f80e53a47e1ca6c24d9..27c788961b787c6f5248254f19ef7ac7a4366206 100644
--- a/unit_tests/recipies/Test_ExplicitCastMove.cpp
+++ b/unit_tests/recipies/Test_ExplicitCastMove.cpp
@@ -11,7 +11,7 @@
 
 #include <catch2/catch_test_macros.hpp>
 
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/graph/OpArgs.hpp"
diff --git a/unit_tests/recipies/Test_FuseBatchNorm.cpp b/unit_tests/recipies/Test_FuseBatchNorm.cpp
index c4b3bf18a5f5b68d0e41b9cd40966790a0cf7ff6..82eec7f0c248b51b8447706168675f19116dbdf8 100644
--- a/unit_tests/recipies/Test_FuseBatchNorm.cpp
+++ b/unit_tests/recipies/Test_FuseBatchNorm.cpp
@@ -18,14 +18,14 @@
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 
 #include "aidge/data/Tensor.hpp"
 
 namespace Aidge {
 
-TEST_CASE("[core/recipies] FuseBatchNorm", "[recipies][FuseBatchNorm]") {
+TEST_CASE("[core/recipes] FuseBatchNorm", "[recipes][FuseBatchNorm]") {
     auto myProd = Producer({2, 3, 3, 3}, "dataProvider");
     auto myConv = Conv(3, 3, {1, 1}, "conv1");
     auto myBN = BatchNorm<2>(32, 1.0e-5F, 0.1F, "batchnorm1");
@@ -86,14 +86,11 @@ TEST_CASE("[core/recipies] FuseBatchNorm", "[recipies][FuseBatchNorm]") {
     myBNOp -> setInput(4, std::make_shared<Tensor>(Array1D<float,3> {{0.4470, 0.3064, 0.7061}}));
 
     auto g1 = Sequential({
+        myProd,
         myConv,
         myBN
     });
     g1 -> setName("fuseBNGraph");
-    myProd -> addChild(myConv); // set graph input
-
-    myProdOp -> setDataType(DataType::Float32);
-    myProdOp -> setBackend("cpu");
     g1 -> compile("cpu", DataType::Float32);
 
     auto s = SequentialScheduler(g1);
@@ -107,7 +104,7 @@ TEST_CASE("[core/recipies] FuseBatchNorm", "[recipies][FuseBatchNorm]") {
     std::shared_ptr<Tensor> res2 = std::make_shared<Tensor>(*(myConvOp -> getOutput(0)));
 
     REQUIRE(g1 -> outputNodes().size() == 1);
-    REQUIRE(g1 -> inputNodes().size() == 1);
+    REQUIRE(g1 -> inputNodes().size() == 0);
     bool eq = true;
     for (std::size_t i = 0; i < res1->size(); ++i) {
          eq &= std::abs(res1->get<float>(i) - res2->get<float>(i)) < 1.0e-06;
diff --git a/unit_tests/recipies/Test_HorizontalTiling.cpp b/unit_tests/recipies/Test_HorizontalTiling.cpp
index 268d94cc55821c41f9c3d4a8451b5730ecaf1bd0..5141e4386d46c181a1adc6f65c4820a60fafed85 100644
--- a/unit_tests/recipies/Test_HorizontalTiling.cpp
+++ b/unit_tests/recipies/Test_HorizontalTiling.cpp
@@ -16,14 +16,14 @@
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ReLU.hpp"
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/operator/Concat.hpp"
 
 
 namespace Aidge {
 
-TEST_CASE("[core/recipies] Tiling(transformation)", "[Tiling][Recipies]") {
+TEST_CASE("[core/recipes] Tiling(transformation)", "[Tiling][Recipes]") {
 
     SECTION("Transform a pre-generated GraphView") {
 
diff --git a/unit_tests/scheduler/Test_CastMove.cpp b/unit_tests/scheduler/Test_CastMove.cpp
index a52b2b06901818f01117273d181d5d5388348f95..1c46ee3b760644b1aa71a75900a1c198660cfa43 100644
--- a/unit_tests/scheduler/Test_CastMove.cpp
+++ b/unit_tests/scheduler/Test_CastMove.cpp
@@ -19,7 +19,7 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 
 #include "aidge/backend/cpu.hpp"
 
diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp
index 70fa1913319dbb6c0caf5bdbd18440324802d118..600a1e1c839cd00d36aeb2c90426720f5868f33d 100644
--- a/unit_tests/scheduler/Test_Scheduler.cpp
+++ b/unit_tests/scheduler/Test_Scheduler.cpp
@@ -206,7 +206,146 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
     SECTION("Test Residual graph") {
     }
 
-    SECTION("Test Recurrent graph") {}
+    SECTION("Test Recurrent graph") {
+        std::shared_ptr<Tensor> in = std::make_shared<Tensor>(
+                Array2D<int, 2, 3>{{{1, 2, 3}, {4, 5, 6}}});
+        std::shared_ptr<Tensor> initTensor = std::make_shared<Tensor>(
+                Array2D<int, 2, 3>{{{0, 0, 0}, {1, 1, 1}}});
+        std::shared_ptr<Tensor> biasTensor = std::make_shared<Tensor>(
+                Array2D<int, 2, 3>{{{2, 0, 0}, {1, 0, 0}}});
+
+        auto add1 = Add(2, "add1");
+        auto mem = Memorize(3, "mem1");
+        auto add2 = Add(2, "add2");
+        auto bias = Producer(biasTensor, "bias");
+        auto init = Producer(initTensor, "init");
+        auto input = Producer(in, "input");
+
+        std::shared_ptr<GraphView> g = Sequential({add1, mem, add2});
+        init->addChild(mem, 0, 1);
+        mem->addChild(add1, 1, 1);
+        bias->addChild(add2, 0, 1);
+        input->addChild(add1, 0, 0);
+        // Update GraphView inputs/outputs following previous connections:
+        g->add({mem, add1, add2, init, bias, input});
+
+        g->setBackend("cpu");
+        g->setDataType(Aidge::DataType::Int32);
+        g->save("graphRecurrent");
+        g->forwardDims();
+        SequentialScheduler scheduler(g);
+        REQUIRE_NOTHROW(scheduler.forward(true, true));
+        scheduler.saveSchedulingDiagram("schedulingRecurrent");
+
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(
+                Array2D<int, 2, 3>{{{5, 6, 9}, {14, 16, 19}}});
+        std::shared_ptr<Tensor> result =
+                std::static_pointer_cast<Tensor>(g->getNode("add2")->getOperator()->getRawOutput(0));
+        result->print();
+        expectedOutput->print();
+        bool equal = (*result == *expectedOutput);
+        REQUIRE(equal);
+    }
+
+    SECTION("Test ConnectInput graph") {
+        std::shared_ptr<GraphView> g =
+                Sequential({
+                    Conv(1, 3, {3, 3}, "conv1"),
+                    Conv(3, 4, {1, 1}, "conv2"),
+                    Conv(4, 3, {1, 1}, "conv3"),
+                    FC(27, 5, false, "fc")});
+
+        // g->getNode("conv1")->getOperator()->setInput(0, inputTensor);
+        g->getNode("conv1")->getOperator()->setInput(1, weight1);
+        g->getNode("conv1")->getOperator()->setInput(2, bias1);
+
+        std::shared_ptr<Tensor> weight2 =
+                std::make_shared<Tensor>(Array4D<int, 4, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}},
+                                                                   {{{4}}, {{5}}, {{6}}},
+                                                                   {{{7}}, {{8}}, {{9}}},
+                                                                   {{{10}}, {{11}}, {{12}}}}});
+        std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 4>{{1, 2, 3, 4}});
+        g->getNode("conv2")->getOperator()->setInput(1, weight2);
+        g->getNode("conv2")->getOperator()->setInput(2, bias2);
+        // *(g->getNode("conv2")->getOperator()->input(1, weight2);
+
+        std::shared_ptr<Tensor> weight3 = std::make_shared<Tensor>(
+                Array4D<int, 3, 4, 1, 1>{{{{{1}}, {{2}}, {{3}}, {{4}}},
+                                          {{{5}}, {{6}}, {{7}}, {{8}}},
+                                          {{{9}}, {{10}}, {{11}}, {{12}}}}});
+        std::shared_ptr<Tensor> bias3 = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
+        g->getNode("conv3")->getOperator()->setInput(1, weight3);
+        g->getNode("conv3")->getOperator()->setInput(2, bias3);
+
+        std::shared_ptr<Tensor> weightfc = std::make_shared<Tensor>(
+                Array2D<int, 5, 27>{{{1,  2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+                                      15, 1, 2, 3, 4, 5, 6, 7, 8, 9,  10, 11, 12},
+                                     {13, 14, 15, 1,  2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+                                      12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+                                     {10, 11, 12, 13, 14, 15, 1,  2, 3, 4, 5, 6, 7, 8,
+                                      9,  10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6},
+                                     {7, 8, 9, 10, 11, 12, 13, 14, 15, 1,  2, 3, 4, 5,
+                                      6, 7, 8, 9,  10, 11, 12, 13, 14, 15, 1, 2, 3},
+                                     {4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2,
+                                      3, 4, 5, 6, 7, 8, 9,  10, 11, 12, 13, 14, 15}}});
+        std::shared_ptr<Tensor> biasfc = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
+        g->getNode("fc")->getOperator()->setInput(1, weightfc);
+        g->getNode("fc")->getOperator()->setInput(2, biasfc);
+
+        // input->addChild(g);
+        g->setDataType(Aidge::DataType::Int32);
+        g->setBackend("cpu");
+        std::vector<std::vector<Aidge::DimSize_t>> dims = {inputTensor->dims()};
+        g->forwardDims(dims);
+        SequentialScheduler scheduler(g);
+
+        std::vector<std::shared_ptr<Aidge::Tensor>> dataIn = {inputTensor};
+        REQUIRE_NOTHROW(scheduler.forward(true, false, dataIn));
+
+        scheduler.saveSchedulingDiagram("schedulingSequential");
+
+        std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
+                {{{{367, 412, 457}, {592, 637, 682}, {817, 862, 907}},
+                  {{854, 980, 1106}, {1484, 1610, 1736}, {2114, 2240, 2366}},
+                  {{1341, 1548, 1755}, {2376, 2583, 2790}, {3411, 3618, 3825}}},
+                 {{{1492, 1537, 1582}, {1717, 1762, 1807}, {1942, 1987, 2032}},
+                  {{4004, 4130, 4256}, {4634, 4760, 4886}, {5264, 5390, 5516}},
+                  {{6516, 6723, 6930}, {7551, 7758, 7965}, {8586, 8793, 9000}}}}});
+
+        std::shared_ptr<Tensor> expectedOutput2 = std::make_shared<Tensor>(Array4D<int, 2, 4, 3, 3>{
+                {{{{6099, 7017, 7935}, {10689, 11607, 12525}, {15279, 16197, 17115}},
+                  {{13786, 15838, 17890}, {24046, 26098, 28150}, {34306, 36358, 38410}},
+                  {{21473, 24659, 27845}, {37403, 40589, 43775}, {53333, 56519, 59705}},
+                  {{29160, 33480, 37800}, {50760, 55080, 59400}, {72360, 76680, 81000}}},
+                 {{{29049, 29967, 30885}, {33639, 34557, 35475}, {38229, 39147, 40065}},
+                  {{65086, 67138, 69190}, {75346, 77398, 79450}, {85606, 87658, 89710}},
+                  {{101123, 104309, 107495}, {117053, 120239, 123425}, {132983, 136169, 139355}},
+                  {{137160, 141480, 145800}, {158760, 163080, 167400}, {180360, 184680, 189000}}}}});
+
+        std::shared_ptr<Tensor> expectedOutput3 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
+                {{{{214731, 246591, 278451}, {374031, 405891, 437751}, {533331, 565191, 597051}},
+                  {{496804, 570568, 644332}, {865624, 939388, 1013152}, {1234444, 1308208, 1381972}},
+                  {{778877, 894545, 1010213}, {1357217, 1472885, 1588553}, {1935557, 2051225, 2166893}}},
+                 {{{1011231, 1043091, 1074951}, {1170531, 1202391, 1234251}, {1329831, 1361691, 1393551}},
+                  {{2340904, 2414668, 2488432}, {2709724, 2783488, 2857252}, {3078544, 3152308, 3226072}},
+                  {{3670577, 3786245, 3901913}, {4248917, 4364585, 4480253}, {4827257, 4942925, 5058593}}}}});
+
+        Tensor expectedOutput4 = Array2D<int, 2, 5>{
+                {{205050376, 198925904, 181355097, 196978090, 238868348},
+                {598467376, 561797804, 560823897, 593043790, 698672948}}};
+        std::shared_ptr<Tensor> other1 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv1")->getOperator())->getOutput(0);
+        bool equal1 = (*other1 == *expectedOutput1);
+        REQUIRE(equal1);
+        std::shared_ptr<Tensor> other2 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv2")->getOperator())->getOutput(0);
+        bool equal2 = (*other2 == *expectedOutput2);
+        REQUIRE(equal2);
+        std::shared_ptr<Tensor> other3 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv3")->getOperator())->getOutput(0);
+        bool equal3 = (*other3 == *expectedOutput3);
+        REQUIRE(equal3);
+        std::shared_ptr<Tensor> other4 = std::static_pointer_cast<OperatorTensor>(g->getNode("fc")->getOperator())->getOutput(0);
+        bool equal4 = (*other4 == expectedOutput4);
+        REQUIRE(equal4);
+    }
 }
 
 TEST_CASE("[cpu/scheduler] SequentialScheduler(backward)", "[scheduler][backward]") {
@@ -230,5 +369,4 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(backward)", "[scheduler][backward
     gv -> compile("cpu", DataType::Int32);
     compile_gradient(gv);
     SequentialScheduler scheduler(gv);
-    scheduler.backward();
-}
\ No newline at end of file
+    scheduler.backward();
\ No newline at end of file