diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 3efb308fa0f78dce35973ccb47d1303d7c8634af..420442101a3892683f52e28e3bc9c8022abbcab5 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -15,7 +15,7 @@ stages:
 
 include:
   - local: '/.gitlab/ci/_global.gitlab-ci.yml'
-  - local: '/.gitlab/ci/static_analysis.gitlab-ci.yml'
+  # - local: '/.gitlab/ci/static_analysis.gitlab-ci.yml'
   - local: '/.gitlab/ci/build.gitlab-ci.yml'
   - local: '/.gitlab/ci/test.gitlab-ci.yml'
-  - local: '/.gitlab/ci/coverage.gitlab-ci.yml'
+  # - local: '/.gitlab/ci/coverage.gitlab-ci.yml'
diff --git a/aidge_backend_cpu/unit_tests/test_recipies.py b/aidge_backend_cpu/unit_tests/test_recipies.py
index 60949adf245f4f4a7ed316879fb307131f70739a..841c15590e9dac7596958b8392c99948978723c5 100644
--- a/aidge_backend_cpu/unit_tests/test_recipies.py
+++ b/aidge_backend_cpu/unit_tests/test_recipies.py
@@ -49,24 +49,24 @@ class test_recipies(unittest.TestCase):
         np_shift = np.array([0.05]).astype(np.float32)
         np_mean = np.array([0.05]).astype(np.float32)
         np_var = np.array([0.05]).astype(np.float32)
-        conv.input(1)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_weights))
-        conv.input(2)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_bias))
-        bn.input(1)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_scale))
-        bn.input(2)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_shift))
-        bn.input(3)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_mean))
-        bn.input(4)[0].get_operator().set_output_tensor(aidge_core.Tensor(np_var))
+        conv.input(1)[0].get_operator().set_output(0, aidge_core.Tensor(np_weights))
+        conv.input(2)[0].get_operator().set_output(0, aidge_core.Tensor(np_bias))
+        bn.input(1)[0].get_operator().set_output(0, aidge_core.Tensor(np_scale))
+        bn.input(2)[0].get_operator().set_output(0, aidge_core.Tensor(np_shift))
+        bn.input(3)[0].get_operator().set_output(0, aidge_core.Tensor(np_mean))
+        bn.input(4)[0].get_operator().set_output(0, aidge_core.Tensor(np_var))
         scheduler0 = aidge_core.SequentialScheduler(graph_view)
         scheduler0.forward()
 
         for outNode in graph_view.get_output_nodes():
-            output_aidge0 = outNode.get_operator().output(0)
+            output_aidge0 = outNode.get_operator().get_output(0)
 
         aidge_core.fuse_batchnorm(graph_view)
         scheduler1 = aidge_core.SequentialScheduler(graph_view)
         scheduler1.forward()
 
         for outNode in graph_view.get_output_nodes():
-            output_aidge1 = outNode.get_operator().output(0)
+            output_aidge1 = outNode.get_operator().get_output(0)
 
         self.assertTrue(aidge_core.approx_eq(output_aidge0, output_aidge1, 0.000001, 0.0001))
 
diff --git a/aidge_backend_cpu/unit_tests/test_scheduler.py b/aidge_backend_cpu/unit_tests/test_scheduler.py
index 3449ff513ef618e24788419c835b7277a1e751f1..2f174efed32fc814010ff61cd42c1bae1105674e 100644
--- a/aidge_backend_cpu/unit_tests/test_scheduler.py
+++ b/aidge_backend_cpu/unit_tests/test_scheduler.py
@@ -22,30 +22,30 @@ class test_scheduler(unittest.TestCase):
         gv.add(relu)
         gv.add(input_node)
 
+        input_node.add_child(relu)
+
         gv.set_datatype(aidge_core.DataType.Int32)
         gv.set_backend("cpu")
 
-        input_node.add_child(relu)
-
         scheduler = aidge_core.SequentialScheduler(gv)
 
         scheduler.forward()
 
-        out_tensor = relu.get_operator().output(0)
+        out_tensor = relu.get_operator().get_output(0)
         expected_out = [0,0,0,0,1,2]
         for i in range(len(expected_out)):
             self.assertEqual(expected_out[i], out_tensor[i])
 
     def test_sequential_scheduling(self):
-        input_data =  np.array([]).astype(np.float32)
+        input_data =  np.array([0]).astype(np.float32)
         input_tensor = aidge_core.Tensor(input_data)
 
         input_node = aidge_core.Producer(input_tensor, "X")
 
         graph_view = aidge_core.sequential([
-            aidge_core.FC(50, name='0'),
-            aidge_core.FC(50, name='1'),
-            aidge_core.FC(10, name='2'),
+            aidge_core.FC(1, 50, name='0'),
+            aidge_core.FC(50, 50, name='1'),
+            aidge_core.FC(50, 10, name='2'),
         ])
         EXPECTED_SCHEDULE = ['0', '1', '2']
 
@@ -64,14 +64,14 @@ class test_scheduler(unittest.TestCase):
 
 
     def test_parallel_scheduling(self):
-        input_data =  np.array([]).astype(np.float32)
+        input_data =  np.array([0]).astype(np.float32)
         input_tensor = aidge_core.Tensor(input_data)
 
         input_node = aidge_core.Producer(input_tensor, "X")
         graph_view = aidge_core.sequential([
-            aidge_core.FC(50, name='0'),
-            aidge_core.parallel([aidge_core.FC(50, name='1'), aidge_core.FC(50, name='3')]),
-            aidge_core.Add(name='2'),
+            aidge_core.FC(1, 50, name='0'),
+            aidge_core.parallel([aidge_core.FC(50, 50, name='1'), aidge_core.FC(50, 50, name='3')]),
+            aidge_core.Add(2, name='2'),
         ])
 
         EXPECTED_SCHEDULE = [['0', '1', '3', '2'],  ['0', '3', '1', '2']] # Both scheduling are valid !
diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp
index 5a7ac3958b76e94c8389b0287fdac40c8c3a5ad8..f78598057cafe0b5b02d268bd5a73ede5a2981d8 100644
--- a/include/aidge/backend/cpu.hpp
+++ b/include/aidge/backend/cpu.hpp
@@ -17,6 +17,7 @@
 #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
 #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp"
 #include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
+#include "aidge/backend/cpu/operator/ConcatImpl.hpp"
 #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
 #include "aidge/backend/cpu/operator/ConvImpl.hpp"
 #include "aidge/backend/cpu/operator/DivImpl.hpp"
@@ -29,6 +30,7 @@
 #include "aidge/backend/cpu/operator/ProducerImpl.hpp"
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/ScalingImpl.hpp"
+#include "aidge/backend/cpu/operator/SliceImpl.hpp"
 #include "aidge/backend/cpu/operator/SqrtImpl.hpp"
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
diff --git a/include/aidge/backend/cpu/data/GetCPUPtr.h b/include/aidge/backend/cpu/data/GetCPUPtr.h
new file mode 100644
index 0000000000000000000000000000000000000000..38ea848afc29fa4c23ff500f97e0c57954695021
--- /dev/null
+++ b/include/aidge/backend/cpu/data/GetCPUPtr.h
@@ -0,0 +1,23 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_
+#define AIDGE_CPU_DATA_GETCPUPTR_H_
+
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
+  return std::static_pointer_cast<Tensor>(data)->getImpl()->rawPtr();
+}
+} // namespace Aidge
+
+#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp
index 9dbd21501462c010384248544b81bb9f26346604..fa1b837902ee72f22c54afdec0ff897db3b39b76 100644
--- a/include/aidge/backend/cpu/operator/AddImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Add.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
@@ -23,87 +24,39 @@ namespace Aidge {
 // class Add_Op<2>;
 
 // compute kernel registry for forward and backward
-template <DimIdx_t NUM>
-class AddImplForward_cpu;
-template <DimIdx_t NUM>
-class AddImplBackward_cpu;
-
-template <>
-class AddImplForward_cpu<1>
-    : public Registrable<AddImplForward_cpu<1>, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {};
-template <>
-class AddImplBackward_cpu<1>
-    : public Registrable<AddImplBackward_cpu<1>, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {};
-
-template <>
-class AddImplForward_cpu<2> : public Registrable<AddImplForward_cpu<2>, std::tuple<DataType, DataType, DataType>,
-                                                     void(const std::size_t, const void*, const void*, void*)> {};
-template <>
-class AddImplBackward_cpu<2> : public Registrable<AddImplBackward_cpu<2>, std::tuple<DataType, DataType, DataType>,
-                                                      void(const std::size_t, const void*, const void*, void*)> {};
-
-template <>
-class AddImplForward_cpu<3> : public Registrable<AddImplForward_cpu<3>, std::tuple<DataType, DataType, DataType, DataType>,
-                                                     void(const std::size_t, const void*, const void*, const void*, void*)> {
-};
-template <>
-class AddImplBackward_cpu<3>
-    : public Registrable<AddImplBackward_cpu<3>, std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::size_t, const void*, const void*, const void*, void*)> {};
+class AddImplForward_cpu
+    : public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const std::vector<const void*>, void*)> {};
+
+class AddImplBackward_cpu
+    : public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const std::vector<const void*>, void*)> {};
+
 
-template <DimIdx_t NUM>
 class AddImpl_cpu : public OperatorImpl {
 public:
-    AddImpl_cpu(const Add_Op<NUM>& op) : OperatorImpl(op) {}
+    AddImpl_cpu(const Add_Op& op) : OperatorImpl(op) {}
 
-    static std::unique_ptr<AddImpl_cpu<NUM>> create(const Add_Op<NUM>& op) {
-        return std::make_unique<AddImpl_cpu<NUM>>(op);
+    static std::unique_ptr<AddImpl_cpu> create(const Add_Op& op) {
+        return std::make_unique<AddImpl_cpu>(op);
     }
-};
 
-template <>
-class AddImpl_cpu<1> : public OperatorImpl {
 public:
-    AddImpl_cpu(const Add_Op<1>& op) : OperatorImpl(op) {}
-
-    static std::unique_ptr<AddImpl_cpu<1>> create(const Add_Op<1>& op) {
-        return std::make_unique<AddImpl_cpu<1>>(op);
-    }
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
 
     NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
-    void forward() override;
-};
 
-template <>
-class AddImpl_cpu<2> : public OperatorImpl {
-public:
-    AddImpl_cpu(const Add_Op<2>& op) : OperatorImpl(op) {}
-
-    static std::unique_ptr<AddImpl_cpu<2>> create(const Add_Op<2>& op) {
-        return std::make_unique<AddImpl_cpu<2>>(op);
-    }
+    NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
 
-    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    void forward() override;
-};
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
 
-template <>
-class AddImpl_cpu<3> : public OperatorImpl {
-public:
-    AddImpl_cpu(const Add_Op<3>& op) : OperatorImpl(op) {}
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
-    static std::unique_ptr<AddImpl_cpu<3>> create(const Add_Op<3>& op) {
-        return std::make_unique<AddImpl_cpu<3>>(op);
-    }
+    void updateConsummerProducer() override final;
 
-    NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
     void forward() override;
 };
 
 namespace {
-static Registrar<Add_Op<1>> registrarAddImpl1I_cpu("cpu", Aidge::AddImpl_cpu<1>::create);
-static Registrar<Add_Op<2>> registrarAddImpl2I_cpu("cpu", Aidge::AddImpl_cpu<2>::create);
-static Registrar<Add_Op<3>> registrarAddImpl3I_cpu("cpu", Aidge::AddImpl_cpu<3>::create);
+static Registrar<Add_Op> registrarAddImpl_cpu("cpu", Aidge::AddImpl_cpu::create);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp
index 221e36dcfac44e21d1b1a35674ca21403b4b57ab..198bcbacc395edf2709fa229828e2228554e6fd2 100644
--- a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp
@@ -18,70 +18,30 @@
 
 namespace Aidge {
 
-template <class I1, class O>
-void AddImpl1I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, void* output_) {
+template <class I, class O>
+void AddImpl_cpu_forward_kernel(const std::size_t inputLength, const std::vector<const void*> inputs_, void* output_) {
     // FIXME: missing Add attributes as arguments
-    const I1* input1 = static_cast<const I1*>(input1_);
-    O* output = static_cast<O*>(output_);
-
-    for (std::size_t oIndex = 0; oIndex < inputLength; ++oIndex) {
-        output[oIndex] = input1[oIndex];
-    }
-}
-
-template <class I1, class I2, class O>
-void AddImpl2I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, const void* input2_,
-                                      void* output_) {
-    // FIXME: missing Add attributes as arguments
-    const I1* input1 = static_cast<const I1*>(input1_);
-    const I2* input2 = static_cast<const I2*>(input2_);
-    O* output = static_cast<O*>(output_);
-
-    for (std::size_t oIndex = 0; oIndex < inputLength; ++oIndex) {
-        output[oIndex] = input1[oIndex] + input2[oIndex];
+    std::vector<const I*> inputs;
+    for (const auto& input_ : inputs_) {
+        inputs.push_back(static_cast<const I*>(input_));
     }
-}
-
-template <class I1, class I2, class I3, class O>
-void AddImpl3I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, const void* input2_,
-                                      const void* input3_, void* output_) {
-    // FIXME: missing Add attributes as arguments
-    const I1* input1 = static_cast<const I1*>(input1_);
-    const I2* input2 = static_cast<const I2*>(input2_);
-    const I3* input3 = static_cast<const I3*>(input3_);
     O* output = static_cast<O*>(output_);
 
-    for (std::size_t oIndex = 0; oIndex < inputLength; ++oIndex) {
-        output[oIndex] = input1[oIndex] + input2[oIndex] + input3[oIndex];
-    }
+	for (std::size_t iIndex = 0; iIndex < inputs.size(); ++iIndex) {
+		for (std::size_t oIndex = 0; oIndex < inputLength; ++oIndex) {
+			output[oIndex] += inputs[iIndex][oIndex];
+		}
+	}
 }
 
 namespace {
-static Registrar<AddImplForward_cpu<1>> registrarAddImpl1IForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::AddImpl1I_cpu_forward_kernel<float, float>);
-static Registrar<AddImplForward_cpu<1>> registrarAddImpl1IForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::AddImpl1I_cpu_forward_kernel<int, int>);
-static Registrar<AddImplForward_cpu<1>> registrarAddImpl1IForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::AddImpl1I_cpu_forward_kernel<double, double>);
-
-static Registrar<AddImplForward_cpu<2>> registrarAddImpl2IForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::AddImpl2I_cpu_forward_kernel<float, float, float>);
-static Registrar<AddImplForward_cpu<2>> registrarAddImpl2IForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32, DataType::Int32}, Aidge::AddImpl2I_cpu_forward_kernel<int, int, int>);
-static Registrar<AddImplForward_cpu<2>> registrarAddImpl2IForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64, DataType::Float64}, Aidge::AddImpl2I_cpu_forward_kernel<double, double, double>);
-
-static Registrar<AddImplForward_cpu<3>> registrarAddImpl3IForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::AddImpl3I_cpu_forward_kernel<float, float, float, float>);
-static Registrar<AddImplForward_cpu<3>> registrarAddImpl3IForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
-        Aidge::AddImpl3I_cpu_forward_kernel<int, int, int, int>);
-static Registrar<AddImplForward_cpu<3>> registrarAddImpl3IForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
-        Aidge::AddImpl3I_cpu_forward_kernel<double, double, double, double>);
+static Registrar<AddImplForward_cpu> registrarAddImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::AddImpl_cpu_forward_kernel<float, float>);
+static Registrar<AddImplForward_cpu> registrarAddImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::AddImpl_cpu_forward_kernel<int, int>);
+static Registrar<AddImplForward_cpu> registrarAddImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::AddImpl_cpu_forward_kernel<double, double>);
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_CPU_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_CPU_FORWARD_KERNEL_H_ */
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
index e3c3a6a28b08386a3b93702f8ce64df68f703119..bfb2b1947281fc30e38fd1fe1663bd5de415d3ee 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class AvgPooling_Op;
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
index ea46a540ad04b6227d6ec01c965e2eb99806d5e1..5598cc9cdfd463b6e40e6801b74203b911a318e6 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/data/Data.hpp"
 #include <array>
 #include <tuple>
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
index 060e19b135c12832e8a7e8cc9c0db828d4a204d1..a599aeb7b427161eb7541829242820c0306d0d31 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class BatchNorm_Op;
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp
index 486829e782ae2173332a7efa6646bb7bba322252..cfde6ebe7cab8cfe2f793723983c8552bd9747b8 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <array>
 #include <cmath>
 #include <algorithm>
diff --git a/include/aidge/backend/cpu/operator/ConcatImpl.hpp b/include/aidge/backend/cpu/operator/ConcatImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d0d3e06365c524da1af485583dda6d6208ef3fb9
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ConcatImpl.hpp
@@ -0,0 +1,73 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_CONCATIMPL_H_
+#define AIDGE_CPU_OPERATOR_CONCATIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Concat.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Concat_Op<2>;
+
+// compute kernel registry for forward and backward
+class ConcatImplForward_cpu
+    : public Registrable<ConcatImplForward_cpu, std::tuple<DataType, DataType>, void(const Concat_Op::Attrs&,
+                                                                                     const std::vector<DimSize_t>,
+                                                                                     const std::vector<DimSize_t>&,
+                                                                                     const std::vector<const void*>,
+                                                                                     void*)> {};
+
+class ConcatImplBackward_cpu
+    : public Registrable<ConcatImplBackward_cpu, std::tuple<DataType, DataType>, void(const Concat_Op::Attrs&,
+                                                                                     const std::vector<DimSize_t>,
+                                                                                     const std::vector<DimSize_t>&,
+                                                                                     const std::vector<const void*>,
+                                                                                     void*)> {};
+
+
+class ConcatImpl_cpu : public OperatorImpl {
+public:
+    ConcatImpl_cpu(const Concat_Op& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<ConcatImpl_cpu> create(const Concat_Op& op) {
+        return std::make_unique<ConcatImpl_cpu>(op);
+    }
+
+public:
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
+
+    NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
+
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+
+    void updateConsummerProducer() override final;
+
+    void forward() override;
+
+    void backward() override;
+};
+
+namespace {
+static Registrar<Concat_Op> registrarConcatImpl_cpu("cpu", Aidge::ConcatImpl_cpu::create);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_CONCATIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ConcatImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConcatImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..ed849b0e1cdb5089275784dea418c832a38dfe66
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ConcatImpl_forward_kernels.hpp
@@ -0,0 +1,79 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_CONCATIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_CONCATIMPL_FORWARD_KERNEL_H_
+
+#include <algorithm>
+#include <numeric>
+#include <cstddef>
+#include <vector>
+
+#include "aidge/backend/cpu/operator/ConcatImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/operator/Concat.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+
+namespace Aidge {
+
+template <class I, class O>
+void ConcatImpl_cpu_forward_kernel(const Concat_Op::Attrs& attrs,
+                                   const std::vector<DimSize_t>& dimsFirstInput,
+                                   const std::vector<DimSize_t>& concatAxisValues,
+                                   const std::vector<const void*>& inputs_,
+                                   void* output_)
+{
+    // FIXME: missing Concat attributes as arguments
+    std::vector<const I*> inputs;
+    for (const auto& input_ : inputs_) {
+        inputs.push_back(static_cast<const I*>(input_));
+    }
+    O* output = static_cast<O*>(output_);
+
+    DimSize_t outputAxisValue = std::accumulate(concatAxisValues.begin(), concatAxisValues.end(), 0);
+
+    DimSize_t prodDimLower = 1;
+    for (DimIdx_t i = 0; i < std::get<0>(attrs); ++i) {
+        prodDimLower *= dimsFirstInput[i];
+    }
+    DimSize_t prodDimHigher = 1;
+    for (DimIdx_t i = std::get<0>(attrs) + 1; static_cast<std::size_t>(i) < dimsFirstInput.size();
+         ++i) {
+        prodDimHigher *= dimsFirstInput[i];
+    }
+
+    std::size_t oIndexStart = 0;
+    std::size_t oIndex = 0;
+    for (std::size_t inputId = 0; inputId < inputs.size(); ++inputId) {
+        oIndex = oIndexStart;
+        const DimSize_t iOffset = prodDimHigher*concatAxisValues[inputId];
+        for (std::size_t iIndex = 0; iIndex < prodDimLower; ++iIndex) {
+            std::copy(inputs[inputId] + iIndex*iOffset, inputs[inputId] + (iIndex+1)*iOffset, output + oIndex);
+            oIndex += prodDimHigher*outputAxisValue;
+        }
+        oIndexStart += concatAxisValues[inputId]*prodDimHigher;
+    }
+}
+
+namespace {
+static Registrar<ConcatImplForward_cpu> registrarConcatImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::ConcatImpl_cpu_forward_kernel<float, float>);
+static Registrar<ConcatImplForward_cpu> registrarConcatImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::ConcatImpl_cpu_forward_kernel<int, int>);
+static Registrar<ConcatImplForward_cpu> registrarConcatImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64},
+        Aidge::ConcatImpl_cpu_forward_kernel<double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_CONCATIMPL_CPU_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
index 7b5dbfb0801fb314d91da15c8a9c4b80fe62eb35..f72890d8903ca4a9876809759587ed4b1ac22e67 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class ConvDepthWise_Op;
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
index 5aa29ac55740d46bba873bb9d85a04cd004cc3bd..95a1aaeccbe728eb2bb957913a5b79f4b8a9548b 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
@@ -16,7 +16,9 @@
 
 #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <cmath>
+#include <cstddef>
 #include <array>
 #include <algorithm>
 
diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp
index 3db91ab507456244676c990427287e5755ab019b..9bc2f27412f388a7fd03db06ac97c612044fab5f 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/Conv.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class Conv_Op;
diff --git a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
index 03e2c35170432181c7a9b3934d61f0bd18471876..cbd784698fcce5152c0bb42a192c327abb2b10dd 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/cpu/operator/ConvImpl.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <cmath>
 #include <array>
 #include <algorithm>
diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp
index 655a9f6c8accb80fc85d8bc7bd9bf378d4f48a6b..73809ee81e26fff23e40763405857ddd2c95db0c 100644
--- a/include/aidge/backend/cpu/operator/DivImpl.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Div.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp
index 5d79369077d06288e218b9002274e7e3d1880b59..86bb7fd1271e5857b595dda8efc0354851c94b7e 100644
--- a/include/aidge/backend/cpu/operator/FCImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/FC.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 #include <array>
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
index 371e2905a81d1dc2e114f6044388d7e6686122f8..4a1da034935e6b1f6c2069b4f91153b77a9f0636 100644
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/LeakyReLU.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
index 2e4b3157360065b0fa857a8bcdd85f1b7442ee63..e8654c6e9cc8fab9080bbb5ed57ea78ee0b7978c 100644
--- a/include/aidge/backend/cpu/operator/MatMulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
@@ -20,6 +20,7 @@
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class MatMul_Op;
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
index a96fcc226b927b135465ef9cf395d10f844a2646..6cde34d9b123b4f83cbfce412ffa62e0144af8d4 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class MaxPooling_Op;
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
index caa99e8678a72c7fd3c77fe8b7579ea739ac64c7..c4baccdee5def0be93be42b5657d77d21240328c 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/data/Data.hpp"
 #include <array>
 #include <tuple>
diff --git a/include/aidge/backend/cpu/operator/MulImpl.hpp b/include/aidge/backend/cpu/operator/MulImpl.hpp
index 54361e4f5f7a361032c9f4928392f18f183724ac..f1b58e59b9ac1d3a1d34162a1054534830b8d508 100644
--- a/include/aidge/backend/cpu/operator/MulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Mul.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/PadImpl.hpp b/include/aidge/backend/cpu/operator/PadImpl.hpp
index 9d93828f5817043f4f5cb07166db213c02866ca1..2320662710f9802878811e51ec4439bd812aea67 100644
--- a/include/aidge/backend/cpu/operator/PadImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/Pad.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 // class Pad_Op;
diff --git a/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
index 8b793257d2f3f126793316d463fe2542512da939..f6f00bc4df661921708e605f44056a77bb8125f4 100644
--- a/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <cmath>
 #include <array>
 #include <algorithm>
diff --git a/include/aidge/backend/cpu/operator/PowImpl.hpp b/include/aidge/backend/cpu/operator/PowImpl.hpp
index c33fbf0ed4adf4a0206ce8ed32ffdce2cd9ad17c..d3cafa7e7380e31dd331950e381e08210c3f3a4c 100644
--- a/include/aidge/backend/cpu/operator/PowImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Pow.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/ProducerImpl.hpp b/include/aidge/backend/cpu/operator/ProducerImpl.hpp
index 19361f1903e8737562dba63b24f3410e6eba1e5b..c1d27f7efc4457fd3b02b6cde006401e2ca71661 100644
--- a/include/aidge/backend/cpu/operator/ProducerImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ProducerImpl.hpp
@@ -18,6 +18,7 @@
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
 class ProducerImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl.hpp b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
index 6596c1c9052ca8f919c3cb2fa7ef5a2fa1f823d4..3338d0c40c057995fe37b1652966241bf4a96b59 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/ScalingImpl.hpp b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
index e336adb003769afd97770fd3dd65796b5bbf6a2d..bbcb4553d7aa4b17d733e0f455373bebb9c3581c 100644
--- a/include/aidge/backend/cpu/operator/ScalingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 #include <array>
diff --git a/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp
index 8fe13bce3a4c470d77b083603d3b889a46fda71f..df8e1a7e7b02a4ad032d6f09fae3ae2cd8a42eff 100644
--- a/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp
@@ -9,14 +9,69 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__
-#define __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__
+#ifndef AIDGE_CPU_OPERATOR_SCALINGIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_SCALINGIMPL_FORWARD_KERNEL_H_
 
+#include <cmath>
+#include <cstddef>
 #include "aidge/utils/Registrar.hpp"
-
 #include "aidge/backend/cpu/operator/ScalingImpl.hpp"
 
+//TODO : improve propagate, n2d2 :
+/*
+template<typename T>
+void N2D2::floatingPointScaling_propagate(const Tensor<T>& input, Tensor<T>& output,
+                                          std::size_t batchSize, std::size_t nbChannels,
+                                          std::size_t height, std::size_t width,
+                                          bool isClipped,
+                                          const std::vector<Float_T>& clippingFactorPerChannel,
+                                          const std::vector<Float_T>& scalingFactorPerChannel,
+                                          std::size_t quantizedNbBits, bool isOutputUnsigned)
+{
+    std::size_t index = 0;
+    for (std::size_t batch = 0; batch < batchSize; batch++) {
+        for(std::size_t ch = 0; ch < nbChannels; ch++) {
+            for(std::size_t y = 0; y < height; y++) {
+                for(std::size_t x = 0; x < width; x++) {
+
+                    T res = isClipped ? Clip(input(index), clippingFactorPerChannel[ch])
+                                    : input(index);
+                    res = Scale(res, scalingFactorPerChannel[ch]);
+
+                    if(quantizedNbBits > 0) {
+                        res = saturate(std::round(res), quantizedNbBits, isOutputUnsigned);
+                    }
+                    output(index) = (T) res;
+                    index++;
+                }
+            }
+        }
+    }
+}
+*/
+
+
 namespace Aidge {
+
+template <class O>
+const O& clamp(const O& x, const O& min, const O& max)
+{
+    return (x < min) ? min : (x > max) ? max : x;
+}
+
+template<class O>
+O saturate(const O value, const std::size_t quantizedNbBits, const bool isOutputUnsigned) {
+    // TODO: no assertions in kernel
+    assert(quantizedNbBits > 0);
+
+    const O min = isOutputUnsigned ? 0 :
+                                  -(1ll << (quantizedNbBits - 1ll));
+    const O max = isOutputUnsigned ? (1ll << quantizedNbBits) - 1ll :
+                                   (1ll << (quantizedNbBits - 1ll)) - 1ll;
+
+    return clamp(value, min, max);
+}
+
 template <class I, class O>
 void ScalingImpl_cpu_forward_kernel(const Scaling_Op::Attrs& attrs,
                                      std::size_t inputLenght,
@@ -26,9 +81,15 @@ void ScalingImpl_cpu_forward_kernel(const Scaling_Op::Attrs& attrs,
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
     const I& scalingFactor = static_cast<const I&>(std::get<0>(attrs));
+    const std::size_t quantizedNbBits = static_cast<std::size_t>(std::get<1>(attrs));
+    const bool isOutputUnsigned = static_cast<bool>(std::get<2>(attrs));
 
     for (std::size_t i = 0; i < inputLenght; ++i) {
         output[i] = input[i] * scalingFactor;
+
+        if(quantizedNbBits > 0) {
+                output[i] = saturate(std::round(output[i]), quantizedNbBits, isOutputUnsigned);
+        }
     }
 }
 
@@ -42,4 +103,4 @@ static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Float64
 }  // namespace
 }  // namespace Aidge
 
-#endif /* __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__ */
+#endif /* AIDGE_CPU_OPERATOR_SCALINGIMPL_FORWARD_KERNEL_H_ */
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/operator/SliceImpl.hpp b/include/aidge/backend/cpu/operator/SliceImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..80e2f0fcef83a369561095f8e55a437f7acc9675
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/SliceImpl.hpp
@@ -0,0 +1,71 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_SLICEIMPL_H_
+#define AIDGE_CPU_OPERATOR_SLICEIMPL_H_
+
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Slice.hpp"
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+// class Slice_Op;
+
+// compute kernel registry for forward and backward
+class SliceImplForward_cpu
+    : public Registrable<SliceImplForward_cpu, std::tuple<DataType>,
+                         void(const typename Slice_Op::Attrs&,
+                              const std::vector<std::size_t>,
+                              const void*,
+                              void*)> {};
+class SliceImplBackward_cpu
+    : public Registrable<SliceImplBackward_cpu, std::tuple<DataType>,
+                         void(const typename Slice_Op::Attrs&,
+                              const std::vector<std::size_t>,
+                              const void*,
+                              void*)> {};
+
+
+class SliceImpl_cpu : public OperatorImpl {
+public:
+    SliceImpl_cpu(const Slice_Op& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<SliceImpl_cpu> create(const Slice_Op& op) {
+        return std::make_unique<SliceImpl_cpu>(op);
+    }
+
+public:
+    NbElts_t getNbRequiredData(const IOIndex_t /*inputIdx*/) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
+    NbElts_t getRequiredMemory(const IOIndex_t outputIdx,
+                               const std::vector<DimSize_t>& inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
+
+    void forward() override;
+
+    void backward() override;
+};
+
+
+namespace {
+static Registrar<Slice_Op> registrarSliceImpl_cpu("cpu", Aidge::SliceImpl_cpu::create);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ */
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7eb4b9dc2cb8dddc8b7fdaf4d63b8f1d39d879b0
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp
@@ -0,0 +1,75 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_SLICEIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_SLICEIMPL_FORWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Slice.hpp"
+#include "aidge/backend/cpu/operator/SliceImpl.hpp"
+#include <vector>
+#include <cstddef>
+
+#include "aidge/data/Data.hpp"
+
+namespace Aidge {
+template <class I>
+void SliceImpl_cpu_forward_kernel(const typename Slice_Op::Attrs& attrs,
+                                     const std::vector<std::size_t> inputDims,
+                                     const void* input_,
+                                     void* output_) {
+
+    const I* input = static_cast<const I*>(input_) + std::get<0>(attrs);
+    I* output = static_cast<I*>(output_);
+    const std::vector<std::size_t> slicedDims = std::get<1>(attrs);
+    const std::size_t nbDims = slicedDims.size();
+
+	// for inputDims = {4,5,5,3} & slicedDims = {3,2,2,1}, substractDims = {1,5,5,3}
+    std::vector<std::size_t> substractedDims = std::vector<std::size_t>(nbDims);
+    for (std::size_t i = 0; i < nbDims; ++i) {
+        substractedDims[i] = inputDims[i] - slicedDims[i];
+    }
+
+	// for slicedDims = {3,2,2,1}, prodSlicedDims = {12,4,2,1}
+    std::vector<std::size_t> prodSlicedDims = std::vector<std::size_t>(nbDims);
+    std::vector<std::size_t> prodInputDims = std::vector<std::size_t>(nbDims+1);
+	prodSlicedDims[nbDims - 1] = slicedDims[nbDims - 1];
+	prodInputDims[nbDims - 1] = inputDims[nbDims - 1];
+	prodInputDims[nbDims] = 1;
+	for (std::size_t i = 2; i <= nbDims; ++i) {
+		prodSlicedDims[nbDims - i] = prodSlicedDims[nbDims - i + 1]*slicedDims[nbDims - i];
+		prodInputDims[nbDims - i] = prodInputDims[nbDims - i + 1]*inputDims[nbDims - i];
+	}
+
+	std::size_t j = 0;
+	std::size_t i = 0;
+	for (; j < prodSlicedDims[0];) {
+		output[j] = input[i++];
+        ++j;
+		for (std::size_t idx = nbDims - 1; idx > 0; --idx) {
+			i += j % prodSlicedDims[idx] == 0 ? substractedDims[idx]*prodInputDims[idx+1] : 0;
+		}
+	}
+}
+
+namespace {
+
+// DIM = 1
+static Registrar<SliceImplForward_cpu> registrarSliceImplForward_cpu_Float32(
+        {DataType::Float32}, Aidge::SliceImpl_cpu_forward_kernel<float>);
+static Registrar<SliceImplForward_cpu> registrarSliceImplForward_cpu_Int32(
+        {DataType::Int32}, Aidge::SliceImpl_cpu_forward_kernel<int>);
+static Registrar<SliceImplForward_cpu> registrarSliceImplForward_cpu_Float64(
+        {DataType::Float64}, Aidge::SliceImpl_cpu_forward_kernel<double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_SLICEIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
index 995f57f7c0168328e1982315358201c9f8940235..15fb2b5d30e32febca7c8028c8b5212e5b96775f 100644
--- a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp
index 297a3a321667dfc8c5a2bb0e3fc3bebce8825950..a5a168a08cf85e952cffd556e0cc34d29d35fffa 100644
--- a/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp
@@ -17,6 +17,7 @@
 #include <cmath>
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 
diff --git a/include/aidge/backend/cpu/operator/SqrtImpl.hpp b/include/aidge/backend/cpu/operator/SqrtImpl.hpp
index 1880408cd52f537c6d4965438ece88151d4df6e3..b3723f27b077b9d5ea7e69fd33bd012d02654ffe 100644
--- a/include/aidge/backend/cpu/operator/SqrtImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SqrtImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/include/aidge/backend/cpu/operator/SubImpl.hpp b/include/aidge/backend/cpu/operator/SubImpl.hpp
index 08ec69e509b2b6c02e30f613abd83208de254f75..2d4c22f0d7f5e850ce805e0c78fb3e64bfa8f42b 100644
--- a/include/aidge/backend/cpu/operator/SubImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SubImpl.hpp
@@ -16,6 +16,7 @@
 #include "aidge/operator/Sub.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <memory>
 #include <vector>
 
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index 4be0078199671bc09af73a5f9dbfcd0ff2e61bed..91d4533c4ee2754dce1b9b7ea9ca8c598f530a52 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -10,93 +10,75 @@
  ********************************************************************************/
 
 #include <cassert>
-#include <chrono>  // std::chrono::milliseconds
 #include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
 #include <vector>
 
-#include "aidge/operator/Conv.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
 
 #include "aidge/backend/cpu/operator/AddImpl.hpp"
 #include "aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp"
 
-//////////////////////////////////
-// AddImpl_cpu<1>
-//////////////////////////////////
+Aidge::NbElts_t Aidge::AddImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return 0;
+    // Requires the whole tensors
+    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->dims();
+    return std::accumulate(inputDims.begin(), inputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
 }
 
-void Aidge::AddImpl_cpu<1>::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
-
-    // Find the correct kernel type
-    auto kernelFunc = Registrar<AddImplForward_cpu<1>>::create({
-        mOp.getInput(0)->dataType(),
-        mOp.getOutput(0)->dataType()});
-
-    // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
+Aidge::NbElts_t  Aidge::AddImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
+    return 0;
 }
 
+Aidge::NbElts_t  Aidge::AddImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+    // Requires the whole tensors, regardless of available data on inputs
+    assert(outputIdx == 0 && "operator has only one output");
+    (void) outputIdx;
 
-//////////////////////////////////
-// AddImpl_cpu<2>
-//////////////////////////////////
+    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims();
+    return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
+}
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation of add can be in-place
-    return 0;
+Aidge::NbElts_t  Aidge::AddImpl_cpu::getNbConsumedData(const Aidge::IOIndex_t inputIdx) const {
+    assert(inputIdx < mNbConsumedData.size());
+    return mNbConsumedData[inputIdx];
 }
 
-void Aidge::AddImpl_cpu<2>::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.getInput(1) && "missing input #1");
-
-    // Find the correct kernel type
-    auto kernelFunc = Registrar<AddImplForward_cpu<2>>::create({
-        mOp.getInput(0)->dataType(),
-        mOp.getInput(1)->dataType(),
-        mOp.getOutput(0)->dataType()});
-
-    // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getInput(1)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
+Aidge::NbElts_t  Aidge::AddImpl_cpu::getNbProducedData(const Aidge::IOIndex_t outputIdx) const {
+    assert(outputIdx < mNbProducedData.size());
+    return mNbProducedData[outputIdx];
 }
 
+void  Aidge::AddImpl_cpu::updateConsummerProducer() {
+    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
 
-//////////////////////////////////
-// AddImpl_cpu<3>
-//////////////////////////////////
+    mNbProducedData[0]+= getRequiredMemory(0, {});
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation of add can be in-place
-    return 0;
 }
 
-void Aidge::AddImpl_cpu<3>::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.getInput(1) && "missing input #1");
-    assert(mOp.getInput(2) && "missing input #2");
-
-    // Find the correct kernel type
-    auto kernelFunc = Registrar<AddImplForward_cpu<3>>::create({
-        mOp.getInput(0)->dataType(),
-        mOp.getInput(1)->dataType(),
-        mOp.getInput(2)->dataType(),
-        mOp.getOutput(0)->dataType()});
-
-    // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getInput(1)->getImpl()->rawPtr(),
-        mOp.getInput(2)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
-}
+void  Aidge::AddImpl_cpu::forward() {
+    assert(mOp.getRawInput(0) && "missing input in Add operator");
+    DataType datatypeFirstInput = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType();
+    for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) {
+        assert(mOp.getRawInput(i) && "missing input in Add operator");
+        assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dataType() == datatypeFirstInput);
+    }
+
+    auto kernelFunc = Registrar<AddImplForward_cpu>::create({
+        datatypeFirstInput,
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    std::vector<const void*> opInputs;
+    for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
+        opInputs.push_back(getCPUPtr(mOp.getRawInput(i)));
+    }
+
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+               opInputs,
+               getCPUPtr(mOp.getRawOutput(0)));
+}
\ No newline at end of file
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index ae93934c23ce9bbc97d071be2f258e04ec8ae877..9e0a77e3285c1e3701142828c74898cb9da5b405 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -15,6 +15,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/AvgPooling.hpp"
 
 #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
@@ -26,15 +27,15 @@ Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*
 }
 
 void Aidge::AvgPoolingImpl2D_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
+    assert(mOp.getRawInput(0) && "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc =
-            Registrar<AvgPoolingImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});
+            Registrar<AvgPoolingImpl2DForward_cpu>::create({std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
     kernelFunc(dynamic_cast<const AvgPooling_Op<2>&>(mOp).getStaticAttributes(),
-               mOp.getInput(0)->dims<4>(),
-               mOp.getInput(0)->getImpl()->rawPtr(),
-               mOp.getOutput(0)->getImpl()->rawPtr());
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
+               getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp
index c9d52b767b03008d19209e34fa9a6f2749a63450..c84f2cb6b09c707f68ed83cc7554624fc6489b84 100644
--- a/src/operator/BatchNormImpl.cpp
+++ b/src/operator/BatchNormImpl.cpp
@@ -14,6 +14,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/BatchNorm.hpp"
 
 #include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
@@ -25,26 +26,27 @@ Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*i
 }
 
 void Aidge::BatchNormImpl2D_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.getInput(1) && "missing input #1");
-    assert(mOp.getInput(2) && "missing input #2");
-    assert(mOp.getInput(3) && "missing input #3");
-    assert(mOp.getInput(4) && "missing input #4");
+    assert(mOp.getRawInput(0) && "missing input #0");
+    assert(mOp.getRawInput(1) && "missing input #1");
+    assert(mOp.getRawInput(2) && "missing input #2");
+    assert(mOp.getRawInput(3) && "missing input #3");
+    assert(mOp.getRawInput(4) && "missing input #4");
 
-    assert(mOp.getOutput(0)->nbDims() == 4);
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->nbDims() == 4);
     // Find the correct kernel type
     auto kernelFunc =
-            Registrar<BatchNormImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getInput(1)->dataType(),
-                                                          mOp.getOutput(0)->dataType()});
+            Registrar<BatchNormImpl2DForward_cpu>::create({std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+                                                           std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
+                                                           std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
     kernelFunc(dynamic_cast<const BatchNorm_Op<2>&>(mOp).getStaticAttributes(),
-               mOp.getInput(0)->dims<4>(),
-               mOp.getInput(0)->getImpl()->rawPtr(),
-               mOp.getInput(1)->getImpl()->rawPtr(),
-               mOp.getInput(2)->getImpl()->rawPtr(),
-               mOp.getInput(3)->getImpl()->rawPtr(),
-               mOp.getInput(4)->getImpl()->rawPtr(),
-               mOp.getOutput(0)->getImpl()->rawPtr(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
+               getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawInput(1)),
+               getCPUPtr(mOp.getRawInput(2)),
+               getCPUPtr(mOp.getRawInput(3)),
+               getCPUPtr(mOp.getRawInput(4)),
+               getCPUPtr(mOp.getRawOutput(0)),
                true);
 }
diff --git a/src/operator/ConcatImpl.cpp b/src/operator/ConcatImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ceefb9031f279be417a8ab0485567a56edea7824
--- /dev/null
+++ b/src/operator/ConcatImpl.cpp
@@ -0,0 +1,90 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <numeric> // std::accumulate
+#include <vector>
+
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+
+#include "aidge/backend/cpu/operator/ConcatImpl.hpp"
+#include "aidge/backend/cpu/operator/ConcatImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::ConcatImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
+
+    // Requires the whole tensors
+    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->dims();
+    return std::accumulate(inputDims.begin(), inputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
+}
+
+Aidge::NbElts_t  Aidge::ConcatImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
+    return 0;
+}
+
+Aidge::NbElts_t  Aidge::ConcatImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+    // Requires the whole tensors, regardless of available data on inputs
+    assert(outputIdx == 0 && "operator has only one output");
+    (void) outputIdx;
+
+    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims();
+    return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
+}
+
+Aidge::NbElts_t  Aidge::ConcatImpl_cpu::getNbConsumedData(const Aidge::IOIndex_t inputIdx) const {
+    assert(inputIdx < mNbConsumedData.size());
+    return mNbConsumedData[inputIdx];
+}
+
+Aidge::NbElts_t  Aidge::ConcatImpl_cpu::getNbProducedData(const Aidge::IOIndex_t outputIdx) const {
+    assert(outputIdx < mNbProducedData.size());
+    return mNbProducedData[outputIdx];
+}
+
+void  Aidge::ConcatImpl_cpu::updateConsummerProducer() {
+    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
+
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+
+}
+
+void  Aidge::ConcatImpl_cpu::forward() {
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input in Concat operator");
+    DataType datatypeFirstInput = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType();
+    for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) {
+        assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(i)) && "missing input in Concat operator");
+        assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dataType() == datatypeFirstInput);
+    }
+
+    auto kernelFunc = Registrar<ConcatImplForward_cpu>::create({
+        datatypeFirstInput,
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+    std::vector<const void*> opInputs;
+    std::vector<DimSize_t> opInputAxis;
+    for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
+        opInputs.push_back(getCPUPtr(mOp.getRawInput(i)));
+        opInputAxis.push_back(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dims()[dynamic_cast<const Concat_Op&>(mOp).template getAttr<DimSize_t>("Axis")]);
+    }
+
+    kernelFunc(dynamic_cast<const Concat_Op&>(mOp).getStaticAttributes(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+               opInputAxis,
+               opInputs,
+               getCPUPtr(mOp.getRawOutput(0)));
+}
+
+void  Aidge::ConcatImpl_cpu::backward() { printf("Not implemented yet.\n"); }
\ No newline at end of file
diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp
index 5ac109e2f282ce55c8a274597be08561c2baf5c8..1b4262e394f78ab0bda4a36440ac7b9cb15c164c 100644
--- a/src/operator/ConvDepthWiseImpl.cpp
+++ b/src/operator/ConvDepthWiseImpl.cpp
@@ -16,6 +16,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/ConvDepthWise.hpp"
 
 #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
@@ -27,19 +28,23 @@ Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t
 }
 
 void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.getInput(1) && "missing input #1");
-    assert(mOp.getInput(2) && "missing input #2");
+    assert(mOp.getRawInput(0) && "missing input #0");
+    assert(mOp.getRawInput(1) && "missing input #1");
+    assert(mOp.getRawInput(2) && "missing input #2");
 
-    assert((mOp.getInput(0)->nbDims() == 4) && "support for 4-dimensions tensors only");
+    assert((std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->nbDims() == 4) && "support for 4-dimensions tensors only");
 
     // Find the correct kernel type
     auto kernelFunc =
-            Registrar<ConvDepthWiseImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getInput(1)->dataType(),
-                                                          mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()});
+            Registrar<ConvDepthWiseImpl2DForward_cpu>::create({std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+                                                               std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
+                                                               std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->dataType(),
+                                                               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
-    kernelFunc(dynamic_cast<const ConvDepthWise_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
-               mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
-               mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
+    kernelFunc(dynamic_cast<const ConvDepthWise_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
+               getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawInput(1)),
+               getCPUPtr(mOp.getRawInput(2)),
+               getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index 347d427908502b9976c2943417775bcbf0d3b344..d476f84717c0ed6f7bd45d68bd24b4d7ada6cbbd 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -16,6 +16,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/Conv.hpp"
 
 #include "aidge/backend/cpu/operator/ConvImpl.hpp"
@@ -28,17 +29,19 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputI
 
 void Aidge::ConvImpl2D_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
-    assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.getInput(1) && "missing input #1");
-    assert(mOp.getInput(2) && "missing input #2");
+    assert(mOp.getRawInput(0) && "missing input #0");
+    assert(mOp.getRawInput(1) && "missing input #1");
+    assert(mOp.getRawInput(2) && "missing input #2");
 
     // Find the correct kernel type
     auto kernelFunc =
-            Registrar<ConvImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getInput(1)->dataType(),
-                                                          mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()});
+            Registrar<ConvImpl2DForward_cpu>::create({std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+                                                      std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
+                                                      std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->dataType(),
+                                                      std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
-    kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
-               mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
-               mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
+    kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
+               getCPUPtr(mOp.getRawInput(0)), getCPUPtr(mOp.getRawInput(1)),
+               getCPUPtr(mOp.getRawInput(2)), getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp
index f7cbc7d20b9126ab318a6989ebf627491cb247aa..f5cde077bd5a414d8b9add8b8b8715952a27ad01 100644
--- a/src/operator/DivImpl.cpp
+++ b/src/operator/DivImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Div.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/DivImpl.hpp"
 #include "aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp"
@@ -27,25 +28,16 @@ Aidge::NbElts_t Aidge::DivImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_
 }
 
 void Aidge::DivImpl_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.getInput(1) && "missing input #1");
-
-    assert(((mOp.getInput(1)->size() == 1) || 
-            (mOp.getInput(1)->size() == mOp.getInput(0)->size()) ||
-            (mOp.getInput(1)->nbDims() == 1 && mOp.getInput(1)->size() == mOp.getInput(0)->dims()[mOp.getInput(0)->nbDims()-1])
-           ) &&
-           "input #1 must either be a tensor of size 1, the number of channels of input # or the same size of input #0");
-
     // Find the correct kernel type
     auto kernelFunc = Registrar<DivImplForward_cpu>::create({
-        mOp.getInput(0)->dataType(),
-        mOp.getInput(1)->dataType(),
-        mOp.getOutput(0)->dataType()});
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getInput(1)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
+    kernelFunc(std::static_pointer_cast<Tensor>(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)))->size(),
+        std::static_pointer_cast<Tensor>(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)))->size(),
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index 77ce50281cf4db94a492fce88a6d73eabde1bae5..14f59f6f7baff57602ad71c8c08023038963b5f0 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -17,29 +17,30 @@
 
 #include "aidge/operator/FC.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/FCImpl.hpp"
 #include "aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp"
 
 void Aidge::FCImpl_cpu::forward()
 {
-    assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.getInput(1) && "missing input #1");
-    assert(mOp.getInput(2) && "missing input #2");
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)) && "missing input #1");
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(2)) && "missing input #2");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<FCImplForward_cpu>::create(
-        {mOp.getInput(0)->dataType(),
-         mOp.getInput(1)->dataType(),
-         mOp.getInput(2)->dataType(),
-         mOp.getOutput(0)->dataType()});
+        {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
+         std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->dataType(),
+         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
-    // if (mOp.getInput(0)->nbDims() == 4) {
+    // if (std::static_pointer_cast<Tensor>(mOp.getRawInput(0)->nbDims() == 4) {
     //     kernelFunc(
     //         mOp.getStaticAttributes(),
-    //         std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
-    //         mOp.getInput(0)->getImpl()->rawPtr(),
+    //         std::static_pointer_cast<Tensor>(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
+    //         getCPUPtr(mOp.getRawInput(0),
     //         mOp.mInputs[1]->getImpl()->rawPtr(),
     //         mOp.mInputs[2]->getImpl()->rawPtr(),
     //         mOp.getOutput(0)->getImpl()->rawPtr());
@@ -47,10 +48,10 @@ void Aidge::FCImpl_cpu::forward()
     // else
     kernelFunc(
         dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(),
-        mOp.getInput(0)->dims()[0],
-        mOp.getInput(0)->sizeM1(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getInput(1)->getImpl()->rawPtr(),
-        mOp.getInput(2)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0],
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->sizeM1(),
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        getCPUPtr(mOp.getRawInput(2)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp
index c81acf60f0171bd819bfd760565e59d361401e29..17912eb1dc75930eaf7595eb189af39df4d4fa2e 100644
--- a/src/operator/LeakyReLUImpl.cpp
+++ b/src/operator/LeakyReLUImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/LeakyReLU.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp"
@@ -27,16 +28,16 @@ Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IO
 }
 
 void Aidge::LeakyReLUImpl_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<LeakyReLUImplForward_cpu>::create({
-        mOp.getInput(0)->dataType(),
-        mOp.getOutput(0)->dataType()});
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
     kernelFunc(dynamic_cast<const LeakyReLU_Op&>(mOp).getStaticAttributes(),
-        std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/MatMulImpl.cpp b/src/operator/MatMulImpl.cpp
index f4812629c4bcf7b699d3eca66ff4e884df0c04d6..1abd75db070bbd3b197519318f5bf23c7b46ee5a 100644
--- a/src/operator/MatMulImpl.cpp
+++ b/src/operator/MatMulImpl.cpp
@@ -17,39 +17,40 @@
 
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
 #include "aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp"
 
 void Aidge::MatMulImpl_cpu::forward()
 {
-    assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.getInput(1) && "missing input #1");
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)) && "missing input #1");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<MatMulImplForward_cpu>::create(
-        {mOp.getInput(0)->dataType(),
-         mOp.getInput(1)->dataType(),
-         mOp.getOutput(0)->dataType()});
+        {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+         std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
+         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
     // if (mOp.getInput(0)->nbDims() == 4) {
     //     kernelFunc(
     //         mOp.getStaticAttributes(),
-    //         std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
-    //         mOp.getInput(0)->getImpl()->rawPtr(),
+    //         std::static_pointer_cast<Tensor>(mOp.getInput(0))->template dims<4>(),
+    //         mOp.getInput(0))->getImpl()->rawPtr(),
     //         mOp.mInputs[1]->getImpl()->rawPtr(),
     //         mOp.mInputs[2]->getImpl()->rawPtr(),
-    //         mOp.getOutput(0)->getImpl()->rawPtr());
+    //         getCPUPtr(mOp.getRawOutput(0));
     // }
     // else
     kernelFunc(
         dynamic_cast<const MatMul_Op&>(mOp).getStaticAttributes(),
-        mOp.getInput(0)->dims()[0],
-        mOp.getInput(0)->sizeM1(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getInput(1)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0],
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->sizeM1(),
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        getCPUPtr(mOp.getRawOutput(0)));
 
 
 }
diff --git a/src/operator/MaxPoolingImpl.cpp b/src/operator/MaxPoolingImpl.cpp
index c5127c1e4577b3da44716cdc34358a8906b9cbb0..e21dab07df4c20eb7253e680146042f205bc210b 100644
--- a/src/operator/MaxPoolingImpl.cpp
+++ b/src/operator/MaxPoolingImpl.cpp
@@ -15,6 +15,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/MaxPooling.hpp"
 
 #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp"
@@ -26,15 +27,15 @@ Aidge::NbElts_t Aidge::MaxPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*
 }
 
 void Aidge::MaxPoolingImpl2D_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc =
-            Registrar<MaxPoolingImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});
+            Registrar<MaxPoolingImpl2DForward_cpu>::create({std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
     kernelFunc(dynamic_cast<const MaxPooling_Op<2>&>(mOp).getStaticAttributes(),
-               mOp.getInput(0)->dims<4>(),
-               mOp.getInput(0)->getImpl()->rawPtr(),
-               mOp.getOutput(0)->getImpl()->rawPtr());
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
+               getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/MulImpl.cpp b/src/operator/MulImpl.cpp
index b6eb245cf0b1afc8893dfbab13d3294b945b3e0e..fda49c3f20ed5cbe519d729a0bf759f0964a99fd 100644
--- a/src/operator/MulImpl.cpp
+++ b/src/operator/MulImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Mul.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/MulImpl.hpp"
 #include "aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp"
@@ -27,25 +28,16 @@ Aidge::NbElts_t Aidge::MulImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_
 }
 
 void Aidge::MulImpl_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.getInput(1) && "missing input #1");
-
-    assert(((mOp.getInput(1)->size() == 1) || 
-            (mOp.getInput(1)->size() == mOp.getInput(0)->size()) ||
-            (mOp.getInput(1)->nbDims() == 1 && mOp.getInput(1)->size() == mOp.getInput(0)->dims()[mOp.getInput(0)->nbDims()-1])
-           ) &&
-           "input #1 must either be a tensor of size 1, the number of channels of input # or the same size of input #0");
-
     // Find the correct kernel type
     auto kernelFunc = Registrar<MulImplForward_cpu>::create({
-        mOp.getInput(0)->dataType(),
-        mOp.getInput(1)->dataType(),
-        mOp.getOutput(0)->dataType()});
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getInput(1)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(),
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/PadImpl.cpp b/src/operator/PadImpl.cpp
index 7c2af9e2161ddc4567b702690b8f268fe1af1b6c..219bf425fa34cdaaa378c49dd7c9837f9d94d97e 100644
--- a/src/operator/PadImpl.cpp
+++ b/src/operator/PadImpl.cpp
@@ -16,6 +16,7 @@
 #include <vector>
 
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/Conv.hpp"
 
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
@@ -27,19 +28,21 @@ Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbRequiredProtected(IOIndex_t inputIdx)
 
     // Padding cannot be in-place!
     // We must ensure that we do not override data that has not been consummed yet.
-    const auto inputSize = std::static_pointer_cast<Tensor>(mOp.getInput(0))->size();
-    const auto outputSize = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->size();
+    const auto inputSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size();
+    const auto outputSize = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size();
     return (outputSize - inputSize);
 }
 
 void Aidge::PadImpl2D_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc =
-            Registrar<PadImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});
+            Registrar<PadImpl2DForward_cpu>::create({std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
-    kernelFunc(dynamic_cast<const Pad_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
-               mOp.getInput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
+    kernelFunc(dynamic_cast<const Pad_Op<2>&>(mOp).getStaticAttributes(),
+                        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
+                        getCPUPtr(mOp.getRawInput(0)),
+                        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/PowImpl.cpp b/src/operator/PowImpl.cpp
index 52a4f46956e0d0f348583a23772c519a64ca857d..496646402e33869cfcbe7dae96e1fc81b875d0dd 100644
--- a/src/operator/PowImpl.cpp
+++ b/src/operator/PowImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Pow.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
 #include "aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp"
@@ -27,25 +28,16 @@ Aidge::NbElts_t Aidge::PowImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_
 }
 
 void Aidge::PowImpl_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.getInput(1) && "missing input #1");
-
-    assert(((mOp.getInput(1)->size() == 1) || 
-            (mOp.getInput(1)->size() == mOp.getInput(0)->size()) ||
-            (mOp.getInput(1)->nbDims() == 1 && mOp.getInput(1)->size() == mOp.getInput(0)->dims()[mOp.getInput(0)->nbDims()-1])
-           ) &&
-           "input #1 must either be a tensor of size 1, the number of channels of input # or the same size of input #0");
-
     // Find the correct kernel type
     auto kernelFunc = Registrar<PowImplForward_cpu>::create({
-        mOp.getInput(0)->dataType(),
-        mOp.getInput(1)->dataType(),
-        mOp.getOutput(0)->dataType()});
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getInput(1)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(),
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/ProducerImpl.cpp b/src/operator/ProducerImpl.cpp
index 404d95ef685fea3c5796e396a2c5e17c60ce53bc..4c5883a9b0155e7bb6e16cbac1b8de1a3a9e9e16 100644
--- a/src/operator/ProducerImpl.cpp
+++ b/src/operator/ProducerImpl.cpp
@@ -16,6 +16,7 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/ProducerImpl.hpp"
 
@@ -26,7 +27,7 @@ Aidge::DimSize_t Aidge::ProducerImpl_cpu::getNbProducedData(
     assert(outputIdx == 0 && "operator has only one output");
     (void) outputIdx;
 
-    return std::static_pointer_cast<Tensor>(mOp.getOutput(0))->size();
+    return std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size();
 }
 
 void Aidge::ProducerImpl_cpu::forward()
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index 647898d3f0495a74fe7c1dd48dba446bd92cb7b5..8863be282ce0c7b7bfbfb938372cf304bc4cc4bd 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp"
@@ -27,15 +28,15 @@ Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex
 }
 
 void Aidge::ReLUImpl_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<ReLUImplForward_cpu>::create({
-        mOp.getInput(0)->dataType(),
-        mOp.getOutput(0)->dataType()});
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
-    kernelFunc(mOp.getInput(0)->size(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/ScalingImpl.cpp b/src/operator/ScalingImpl.cpp
index 39c1326dd677a704795f625440e385d3f3a6465c..6b9aab31a9d61d2d7a5ff89961de3fa6a2b5ebd2 100644
--- a/src/operator/ScalingImpl.cpp
+++ b/src/operator/ScalingImpl.cpp
@@ -18,6 +18,7 @@
 #include "aidge/backend/cpu/operator/ScalingImpl.hpp"
 #include "aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include <vector>
 
 Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
@@ -26,16 +27,16 @@ Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredProtected(const Aidge::IOIn
 }
 
 void Aidge::ScalingImpl_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<ScalingImplForward_cpu>::create({
-        mOp.getInput(0)->dataType(),
-        mOp.getOutput(0)->dataType()});
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
     kernelFunc(dynamic_cast<const Scaling_Op&>(mOp).getStaticAttributes(),
-        std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/SliceImpl.cpp b/src/operator/SliceImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b60bbe60188f416f28ff2562875dce6e5ee15bd5
--- /dev/null
+++ b/src/operator/SliceImpl.cpp
@@ -0,0 +1,82 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <numeric>    // std::accumulate
+#include <functional> // std::multiplies
+
+#include "aidge/operator/Slice.hpp"
+
+#include "aidge/backend/cpu/operator/SliceImpl.hpp"
+#include "aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp"
+#include "aidge/utils/Types.h"
+#include <vector>
+#include <cassert>
+#include <tuple>
+
+Aidge::NbElts_t Aidge::SliceImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t /*inputIdx*/) const {
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "requires valid input");
+
+    // Requires the whole tensors
+    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims();
+
+    return std::accumulate(inputDims.begin(), inputDims.end(), static_cast<NbElts_t>(1),
+                            std::multiplies<NbElts_t>());
+}
+
+Aidge::NbElts_t Aidge::SliceImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { return 0; }
+
+Aidge::NbElts_t Aidge::SliceImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                            const std::vector<Aidge::DimSize_t>& inputsSize) const {
+    (void)outputIdx;
+    (void)inputsSize;
+    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims();
+    return std::accumulate(outputDims.begin(), outputDims.end(), static_cast<NbElts_t>(1),
+                            std::multiplies<NbElts_t>());
+}
+
+Aidge::NbElts_t Aidge::SliceImpl_cpu::getNbConsumedData(const Aidge::IOIndex_t /*inputIdx*/) const {
+    return mNbConsumedData[0];
+}
+
+Aidge::NbElts_t Aidge::SliceImpl_cpu::getNbProducedData(const Aidge::IOIndex_t /*outputIdx*/) const {
+    return mNbProducedData[0];
+}
+
+void Aidge::SliceImpl_cpu::updateConsummerProducer() {
+    // each input is consumed by the minimum amount for a forward pass
+    mNbConsumedData[0] += getNbRequiredData(0);
+
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
+
+void Aidge::SliceImpl_cpu::forward() {
+    // FIXME: uncomment the following code once memory handling will work
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<SliceImplForward_cpu>::create(
+            {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const Slice_Op&>(mOp).getStaticAttributes(),
+            std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+            std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+            std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()
+            );
+
+    // each input is consumed by the minimum amount for a forward pass
+    mNbConsumedData[0] += getNbRequiredData(0);
+
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
+
+void Aidge::SliceImpl_cpu::backward() { printf("Not implemented yet.\n"); }
\ No newline at end of file
diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp
index 45b455a3f361587848e33864872f497493315a78..428d32fc7a4c1a2b639d4f78601c78ab41376b47 100644
--- a/src/operator/SoftmaxImpl.cpp
+++ b/src/operator/SoftmaxImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 #include "aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp"
@@ -27,21 +28,21 @@ Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(const Aidge::IOIn
 }
 
 void Aidge::SoftmaxImpl_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.getInput(0)->nbDims()>1);
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->nbDims()>1);
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<SoftmaxImplForward_cpu>::create({
-        mOp.getInput(0)->dataType(),
-        mOp.getOutput(0)->dataType()});
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
-    DimSize_t batchSize = mOp.getInput(0)->dims()[0];
-    DimSize_t channelSize = mOp.getInput(0)->dims()[1];
-    DimSize_t featureSize = mOp.getInput(0)->sizeM1()/channelSize;
+    DimSize_t batchSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[0];
+    DimSize_t channelSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims()[1];
+    DimSize_t featureSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->sizeM1()/channelSize;
     // Call kernel
     kernelFunc(batchSize,
                channelSize,
                featureSize,
-               mOp.getInput(0)->getImpl()->rawPtr(),
-               mOp.getOutput(0)->getImpl()->rawPtr());
+               getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/SqrtImpl.cpp b/src/operator/SqrtImpl.cpp
index 75d1d2fb20b6748c931124847198b3168d9bdba7..2766e8ae21738775aadad86629a99d0a180e537e 100644
--- a/src/operator/SqrtImpl.cpp
+++ b/src/operator/SqrtImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/SqrtImpl.hpp"
 #include "aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp"
@@ -27,15 +28,15 @@ Aidge::NbElts_t Aidge::SqrtImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex
 }
 
 void Aidge::SqrtImpl_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
+    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<SqrtImplForward_cpu>::create({
-        mOp.getInput(0)->dataType(),
-        mOp.getOutput(0)->dataType()});
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
-    kernelFunc(mOp.getInput(0)->size(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
\ No newline at end of file
diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
index 6d87821d89ff84aa1046a9ecf0fdd83dcc5dda53..038a1154182ea8f359cf1b485c3de251ffbbaed5 100644
--- a/src/operator/SubImpl.cpp
+++ b/src/operator/SubImpl.cpp
@@ -17,6 +17,7 @@
 
 #include "aidge/operator/Sub.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
 #include "aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp"
@@ -27,25 +28,17 @@ Aidge::NbElts_t Aidge::SubImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_
 }
 
 void Aidge::SubImpl_cpu::forward() {
-    assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.getInput(1) && "missing input #1");
-
-    assert(((mOp.getInput(1)->size() == 1) || 
-            (mOp.getInput(1)->size() == mOp.getInput(0)->size()) ||
-            (mOp.getInput(1)->nbDims() == 1 && mOp.getInput(1)->size() == mOp.getInput(0)->dims()[mOp.getInput(0)->nbDims()-1])
-           ) &&
-           "input #1 must either be a tensor of size 1, the number of channels of input # or the same size of input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<SubImplForward_cpu>::create({
-        mOp.getInput(0)->dataType(),
-        mOp.getInput(1)->dataType(),
-        mOp.getOutput(0)->dataType()});
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
+        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
-        mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.getInput(1)->getImpl()->rawPtr(),
-        mOp.getOutput(0)->getImpl()->rawPtr());
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->size(),
+        getCPUPtr(mOp.getRawInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/unit_tests/operator/Test_AddImpl.cpp b/unit_tests/operator/Test_AddImpl.cpp
index 18d98d169ddcb74310c5153d7c2c95103c395bb7..740b1a5322b55e2347d93ed2e515358080a108a5 100644
--- a/unit_tests/operator/Test_AddImpl.cpp
+++ b/unit_tests/operator/Test_AddImpl.cpp
@@ -18,7 +18,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Add(forward)") {
+TEST_CASE("[cpu/operator] Add(forward)", "[Add][CPU]") {
     std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
         {                                       //
             {                                   //
@@ -40,14 +40,15 @@ TEST_CASE("[cpu/operator] Add(forward)") {
     });                                         //
 
     SECTION("One input") {
-        std::shared_ptr<Node> myAdd = Add<1>();
-        myAdd->getOperator()->setBackend("cpu");
-        myAdd->getOperator()->setDatatype(DataType::Int32);
-        myAdd->getOperator()->associateInput(0, input1);
-        myAdd->getOperator()->computeOutputDims();
+        std::shared_ptr<Node> myAdd = Add(1);
+        auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator());
+        op->associateInput(0, input1);
+        op->setBackend("cpu");
+        op->setDataType(DataType::Int32);
+        op->computeOutputDims();
         myAdd->forward();
 
-        REQUIRE(*std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0)) == *input1);
+        REQUIRE(*(op->getOutput(0)) == *input1);
     }
 
     SECTION("Two inputs") {
@@ -71,15 +72,16 @@ TEST_CASE("[cpu/operator] Add(forward)") {
             }
         });
 
-        std::shared_ptr<Node> myAdd = Add<2>();
-        myAdd->getOperator()->setDatatype(DataType::Int32);
-        myAdd->getOperator()->setBackend("cpu");
-        myAdd->getOperator()->associateInput(0, input1);
-        myAdd->getOperator()->associateInput(1, input1);
-        myAdd->getOperator()->computeOutputDims();
+        std::shared_ptr<Node> myAdd = Add(2);
+        auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator());
+        op->associateInput(0, input1);
+        op->associateInput(1, input1);
+        op->setBackend("cpu");
+        op->setDataType(DataType::Int32);
+        op->computeOutputDims();
         myAdd->forward();
 
-        REQUIRE(*std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("Three inputs") {
@@ -103,15 +105,16 @@ TEST_CASE("[cpu/operator] Add(forward)") {
             }
         });
 
-        std::shared_ptr<Node> myAdd = Add<3>();
-        myAdd->getOperator()->setDatatype(DataType::Int32);
-        myAdd->getOperator()->setBackend("cpu");
-        myAdd->getOperator()->associateInput(0, input1);
-        myAdd->getOperator()->associateInput(1, input1);
-        myAdd->getOperator()->associateInput(2, input1);
-        myAdd->getOperator()->computeOutputDims();
+        std::shared_ptr<Node> myAdd = Add(3);
+        auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator());
+        op->associateInput(0, input1);
+        op->associateInput(1, input1);
+        op->associateInput(2, input1);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myAdd->forward();
 
-        REQUIRE(*std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(*op->getOutput(0) == *expectedOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_AvgPoolingImpl.cpp b/unit_tests/operator/Test_AvgPoolingImpl.cpp
index 10d4c09b32528e2cdcdbf2c56204e6911fca0187..c4abf0201771c3f39a429e0f935b8216a04514e1 100644
--- a/unit_tests/operator/Test_AvgPoolingImpl.cpp
+++ b/unit_tests/operator/Test_AvgPoolingImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] AvgPooling(forward)") {
+TEST_CASE("[cpu/operator] AvgPooling(forward)", "[AvgPooling][CPU]") {
     std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<float,2,2,5,5> { //NCHW
         {
             {
@@ -53,10 +53,9 @@ TEST_CASE("[cpu/operator] AvgPooling(forward)") {
     });
     SECTION("Stride") {
         std::shared_ptr<Node> myAvgPool = AvgPooling({2,2}, "mycdw", {2,2});
-        myAvgPool->getOperator()->setDatatype(DataType::Float32);
-        myAvgPool->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator());
 
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,2,2,2> { 
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,2,2,2> {
             {
                 {
                     {{  3,   5},
@@ -72,11 +71,13 @@ TEST_CASE("[cpu/operator] AvgPooling(forward)") {
                 }
             }
         });
-        myAvgPool->getOperator()->associateInput(0,myInput);
-        myAvgPool->getOperator()->computeOutputDims();
+        op->associateInput(0,myInput);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myAvgPool->forward();
-        myAvgPool->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myAvgPool->getOperator()->getOutput(0)) == *myOutput);
+        op->getOutput(0)->print();
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     SECTION("Stride >= feature dim") {
@@ -90,21 +91,22 @@ TEST_CASE("[cpu/operator] AvgPooling(forward)") {
         }
         });
         std::shared_ptr<Node> myAvgPool = AvgPooling({3,3}, "mycdw", {3,3});
-        myAvgPool->getOperator()->setDatatype(DataType::Float32);
-        myAvgPool->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator());
 
-        Tensor myOutput = Array4D<float,1,1,1,1> { 
+        Tensor myOutput = Array4D<float,1,1,1,1> {
             {{{{(0.3745 + 0.9507 + 0.7320 + 0.5987 + 0.1560 + 0.1560 + 0.0581 + 0.8662 + 0.6011)/9.0}}}}
         };
-        myAvgPool->getOperator()->associateInput(0,myInput2);
-        myAvgPool->getOperator()->computeOutputDims();
+        op->associateInput(0,myInput2);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myAvgPool->forward();
-        myAvgPool->getOperator()->getOutput(0)->print();
-        float* outPtr = static_cast<float*>(myAvgPool->getOperator()->output(0).getImpl()->rawPtr());
+        op->getOutput(0)->print();
+        float* outPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedOutPtr = static_cast<float*>(myOutput.getImpl()->rawPtr());
         for (std::size_t i = 0; i < 1; ++i) {
             REQUIRE(std::abs(outPtr[i] - expectedOutPtr[i]) < 0.00001);
         }
     }
-    // std::cout << static_cast<Tensor>((*myAvgPool->getOperator())["weight"])[0][0][0][0] << std::endl;
+    // std::cout << static_cast<Tensor>((*op)["weight"])[0][0][0][0] << std::endl;
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_BatchNormImpl.cpp b/unit_tests/operator/Test_BatchNormImpl.cpp
index e6107a028e0c3d62f69821ff2650b45f34da103f..e6b7c3c655b865973028fc8c43323a7db3f4a5ef 100644
--- a/unit_tests/operator/Test_BatchNormImpl.cpp
+++ b/unit_tests/operator/Test_BatchNormImpl.cpp
@@ -19,10 +19,9 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] BatchNorm(forward)") {
+TEST_CASE("[cpu/operator] BatchNorm(forward)", "[BatchNorm][CPU]") {
     std::shared_ptr<Node> myBatchNorm = BatchNorm<2>(0.00001F, 0.1F, "mybatchnorm");
-    myBatchNorm->getOperator()->setDatatype(DataType::Float32);
-    myBatchNorm->getOperator()->setBackend("cpu");
+    auto op = std::static_pointer_cast<OperatorTensor>(myBatchNorm -> getOperator());
     std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array1D<float,3> {{0.9044, 0.3028, 0.0218}});
     std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<float,3> {{0.1332, 0.7503, 0.0878}});
     std::shared_ptr<Tensor> myMean = std::make_shared<Tensor>(Array1D<float,3> {{0.9931, 0.8421, 0.9936}});
@@ -53,7 +52,7 @@ TEST_CASE("[cpu/operator] BatchNorm(forward)") {
                 }
             }
     });
-    std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,3,3,3> { 
+    std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
         {
             {
                 {{-0.08978321, -0.12890550, -0.21362889},
@@ -79,19 +78,21 @@ TEST_CASE("[cpu/operator] BatchNorm(forward)") {
             }
         }
     });
-    myBatchNorm->getOperator()->associateInput(0,myInput);
-    myBatchNorm->getOperator()->associateInput(1,myWeights);
-    myBatchNorm->getOperator()->associateInput(2,myBias);
-    myBatchNorm->getOperator()->associateInput(3,myMean);
-    myBatchNorm->getOperator()->associateInput(4,myVar);
-    myBatchNorm->getOperator()->computeOutputDims();
+    op->associateInput(0,myInput);
+    op->associateInput(1,myWeights);
+    op->associateInput(2,myBias);
+    op->associateInput(3,myMean);
+    op->associateInput(4,myVar);
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+    op->computeOutputDims();
     myBatchNorm->forward();
 
-    float* resPtr = static_cast<float*>(myBatchNorm->getOperator()->getOutput(0)->getImpl()->rawPtr());
+    float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
     float* expectedPtr = static_cast<float*>(myOutput->getImpl()->rawPtr());
     for (std::size_t i = 0; i< 54; ++i) {
         REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
     }
 
-    // std::cout << static_cast<Tensor>((*myBatchNorm->getOperator())["weight"])[0][0][0][0] << std::endl;
+    // std::cout << static_cast<Tensor>((*op)["weight"])[0][0][0][0] << std::endl;
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_ConcatImpl.cpp b/unit_tests/operator/Test_ConcatImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7f616fcb30cd51efb790fe725d423600901f2976
--- /dev/null
+++ b/unit_tests/operator/Test_ConcatImpl.cpp
@@ -0,0 +1,147 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Add.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
+    SECTION("Concat 1D inputs") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array1D<int,2>{{ 2, 3 }});
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array1D<int,3>{{ 4, 5, 6 }});
+        std::shared_ptr<Tensor> input3 = std::make_shared<Tensor>(Array1D<int,4>{{ 7, 8, 9, 10 }});
+        std::shared_ptr<Tensor> input4 = std::make_shared<Tensor>(Array1D<int,5>{{ 11, 12, 13, 14, 15 }});
+        std::shared_ptr<Tensor> input5 = std::make_shared<Tensor>(Array1D<int,6>{{ 16, 17, 18, 19, 20, 21 }});
+
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,20>{
+            { 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20,21 }});
+
+        auto myConcat = Concat(5, 0);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        myConcat->getOperator()->associateInput(2, input3);
+        myConcat->getOperator()->associateInput(3, input4);
+        myConcat->getOperator()->associateInput(4, input5);
+        myConcat->getOperator()->setBackend("cpu");
+        myConcat->getOperator()->setDataType(DataType::Int32);
+        std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->computeOutputDims();
+        myConcat->forward();
+
+        std::static_pointer_cast<Tensor>(myConcat->getOperator()->getRawOutput(0))->print();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
+    }
+    SECTION("Concat 4D inputs on 1st axis") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
+            {                                       //
+                {                                   //
+                    {{20, 47},{21, 48},{22, 49}},   //
+                    {{23, 50},{24, 51},{25, 52}},   //
+                    {{26, 53},{27, 54},{28, 55}}    //
+                },                                  //
+            }                                       //
+        });                                         //
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array4D<int,2,3,3,2> {
+            {
+                {                                   //
+                    {{29, 56},{30, 57},{31, 58}},   //
+                    {{32, 59},{33, 60},{34, 61}},   //
+                    {{35, 62},{36, 63},{37, 64}}    //
+                },                                  //
+                {                                   //
+                    {{38, 65},{39, 66},{40, 67}},   //
+                    {{41, 68},{42, 69},{43, 70}},   //
+                    {{44, 71},{45, 72},{46, 73}}    //
+                }                                   //
+            }                                       //
+        });                                         //
+
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
+            {                                       //
+                {                                   //
+                    {{20, 47},{21, 48},{22, 49}},   //
+                    {{23, 50},{24, 51},{25, 52}},   //
+                    {{26, 53},{27, 54},{28, 55}}    //
+                },                                  //
+                {                                   //
+                    {{29, 56},{30, 57},{31, 58}},   //
+                    {{32, 59},{33, 60},{34, 61}},   //
+                    {{35, 62},{36, 63},{37, 64}}    //
+                },                                  //
+                {                                   //
+                    {{38, 65},{39, 66},{40, 67}},   //
+                    {{41, 68},{42, 69},{43, 70}},   //
+                    {{44, 71},{45, 72},{46, 73}}    //
+                }                                   //
+            }                                       //
+        });                                         //
+
+        auto myConcat = Concat(2, 0);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        myConcat->getOperator()->setBackend("cpu");
+        myConcat->getOperator()->setDataType(DataType::Int32);
+        std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->computeOutputDims();
+        myConcat->forward();
+
+        std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0)->print();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
+    }
+
+    SECTION("Concat 4D inputs on 3rd axis") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
+            {                                       //
+                {                                   //
+                    {{20, 47},{21, 48},{22, 49}},   //
+                    {{23, 50},{24, 51},{25, 52}},   //
+                    {{26, 53},{27, 54},{28, 55}}    //
+                },                                  //
+            }                                       //
+        });                                         //
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array4D<int,1,3,6,2> {
+            {
+                {                                   //
+                    {{29, 56},{30, 57},{31, 58},{38, 65},{39, 66},{40, 67}},   //
+                    {{32, 59},{33, 60},{34, 61},{41, 68},{42, 69},{43, 70}},   //
+                    {{35, 62},{36, 63},{37, 64},{44, 71},{45, 72},{46, 73}}    //
+                },
+            }
+        });
+
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,1,3,9,2> {
+            {                                                                                             //
+                {                                                                                         //
+                    {{20, 47},{21, 48},{22, 49},{29, 56},{30, 57},{31, 58},{38, 65},{39, 66},{40, 67}},   //
+                    {{23, 50},{24, 51},{25, 52},{32, 59},{33, 60},{34, 61},{41, 68},{42, 69},{43, 70}},   //
+                    {{26, 53},{27, 54},{28, 55},{35, 62},{36, 63},{37, 64},{44, 71},{45, 72},{46, 73}}    //
+                },                                                                                        //
+            }                                                                                             //
+        });                                                                                               //
+
+        auto myConcat = Concat(2, 2);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        myConcat->getOperator()->setBackend("cpu");
+        myConcat->getOperator()->setDataType(DataType::Int32);
+        std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->computeOutputDims();
+        myConcat->forward();
+
+        std::static_pointer_cast<Tensor>(myConcat->getOperator()->getRawOutput(0))->print();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_ConvDepthWiseImpl.cpp b/unit_tests/operator/Test_ConvDepthWiseImpl.cpp
index 0d0ed4b928d64cafc96907fedf3ee0d642a255d0..112703b64162004ab708f143d6e12b0c8bb9c6b6 100644
--- a/unit_tests/operator/Test_ConvDepthWiseImpl.cpp
+++ b/unit_tests/operator/Test_ConvDepthWiseImpl.cpp
@@ -19,34 +19,33 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] ConvDepthWise(forward)") {
-    std::shared_ptr<Node> myCDW = ConvDepthWise({3,3}, "mycdw");
-    myCDW->getOperator()->setDatatype(DataType::Int32);
-    myCDW->getOperator()->setBackend("cpu");
-    std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array3D<int,4,3,3> {
+TEST_CASE("[cpu/operator] ConvDepthWise(forward)", "[ConvDepthWise][CPU]") {
+    std::shared_ptr<Node> myCDW = ConvDepthWise(4, {3,3}, "mycdw");
+    auto op = std::static_pointer_cast<OperatorTensor>(myCDW -> getOperator());
+    std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,1,3,3> {
         {
-            {
+            {{
                 {  0,  1,  2},
                 {  3,  4,  5},
                 {  6,  7,  8}
 
-            },
-            {
+            }},
+            {{
                 { 27, 28, 29},
                 { 30, 31, 32},
                 { 33, 34, 35}
 
-            },
-            {
+            }},
+            {{
                 { 54, 55, 56},
                 { 57, 58, 59},
                 { 60, 61, 62}
-            },
-            {
+            }},
+            {{
                 { 81, 82, 83},
                 { 84, 85, 86},
                 { 87, 88, 89}
-            }
+            }}
         }
     });
     std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
@@ -104,7 +103,7 @@ TEST_CASE("[cpu/operator] ConvDepthWise(forward)") {
             }
         }
     });
-    std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> { 
+    std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> {
         {
             {
                 {{   319,    355,    391},
@@ -142,13 +141,15 @@ TEST_CASE("[cpu/operator] ConvDepthWise(forward)") {
             }
         }
     });
-    myCDW->getOperator()->associateInput(0,myInput);
-    myCDW->getOperator()->associateInput(1,myWeights);
-    myCDW->getOperator()->associateInput(2,myBias);
-    myCDW->getOperator()->computeOutputDims();
-    myCDW->forward();
-    myCDW->getOperator()->getOutput(0)->print();
-    REQUIRE(*(myCDW->getOperator()->getOutput(0)) == *myOutput);
-
-    // std::cout << static_cast<Tensor>((*myCDW->getOperator())["weight"])[0][0][0][0] << std::endl;
+    op -> associateInput(0, myInput);
+    op -> associateInput(1, myWeights);
+    op -> associateInput(2, myBias);
+    op->setDataType(DataType::Int32);
+    op->setBackend("cpu");
+    op -> computeOutputDims();
+    myCDW -> forward();
+    op -> getOutput(0) -> print();
+    REQUIRE(*(op -> getOutput(0)) == *myOutput);
+
+    // std::cout << static_cast<Tensor>((*op)["weight"])[0][0][0][0] << std::endl;
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_ConvImpl.cpp b/unit_tests/operator/Test_ConvImpl.cpp
index 891f0e94b02d07d41751728e83fa9b42e4b89be8..0f46e8f6405366a32f45ce61d61fc94afabdd4a8 100644
--- a/unit_tests/operator/Test_ConvImpl.cpp
+++ b/unit_tests/operator/Test_ConvImpl.cpp
@@ -20,11 +20,10 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Conv(forward)") {
+TEST_CASE("[cpu/operator] Conv(forward)", "[Conv][CPU]") {
     SECTION("Classic Conv") {
         std::shared_ptr<Node> myConv = Conv(3,4,{3,3}, "myconv");
-        myConv->getOperator()->setDatatype(DataType::Int32);
-        myConv->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myConv -> getOperator());
         std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
             {
                 {
@@ -116,7 +115,7 @@ TEST_CASE("[cpu/operator] Conv(forward)") {
                 }
             }
         });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> { 
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> {
             {
                 {
                     {{ 15226,  15577,  15928},
@@ -148,19 +147,20 @@ TEST_CASE("[cpu/operator] Conv(forward)") {
                 }
             }
         });
-        myConv->getOperator()->associateInput(0,myInput);
-        myConv->getOperator()->associateInput(1,myWeights);
-        myConv->getOperator()->associateInput(2,myBias);
-        myConv->getOperator()->computeOutputDims();
+        op->associateInput(0,myInput);
+        op->associateInput(1,myWeights);
+        op->associateInput(2,myBias);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myConv->forward();
-        // myConv->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
+        // op->getOutput(0)->print();
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
     SECTION("Point-wise") {
         std::shared_ptr<Node> myConv = Conv(3,4,{1,1}, "myconv", {1,1});
-        myConv->getOperator()->setDatatype(DataType::Float32);
-        myConv->getOperator()->setBackend("cpu");
-        myConv->getOperator()->input(0) = Array4D<float,2,3,3,3> {
+        auto op = std::static_pointer_cast<OperatorTensor>(myConv -> getOperator());
+        op->setInput(0, std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
             {
                 {
                     {{-1.38467371F, -0.87123615F, -0.22336592F},
@@ -185,8 +185,8 @@ TEST_CASE("[cpu/operator] Conv(forward)") {
                      { 0.09811721F,  1.74225271F, -1.35267365F}}
                 }
             }
-        };
-        myConv->getOperator()->input(1) = Array4D<float,4,3,1,1> {
+        }));
+        op->setInput(1, std::make_shared<Tensor>(Array4D<float,4,3,1,1> {
             {
                 {
                     {{ 0.33669037F}},
@@ -208,8 +208,8 @@ TEST_CASE("[cpu/operator] Conv(forward)") {
                     {{ 0.80935723F}}
                 }
             }
-        };
-        myConv->getOperator()->input(2) = Array1D<float,4> {{ 1.11029029F, -1.68979895F, -0.98895991F,  0.95797181F}};
+        }));
+        op->setInput(2, std::make_shared<Tensor>(Array1D<float,4> {{ 1.11029029F, -1.68979895F, -0.98895991F,  0.95797181F}}));
         Tensor expectedOutput = Array4D<float,2,4,3,3> {
             {
                 {
@@ -242,11 +242,12 @@ TEST_CASE("[cpu/operator] Conv(forward)") {
                 }
             }
         };
-
-        myConv->getOperator()->computeOutputDims();
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myConv->forward();
-        
-        float* resPtr = static_cast<float*>(myConv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput.getImpl()->rawPtr());
         for (std::size_t i = 0; i< expectedOutput.size(); ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/operator/Test_DivImpl.cpp b/unit_tests/operator/Test_DivImpl.cpp
index c33319c88b63ee834bbcb388bbbe0775699edbd7..16f69db964a092f6be87e5d983ba00694e8006f8 100644
--- a/unit_tests/operator/Test_DivImpl.cpp
+++ b/unit_tests/operator/Test_DivImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Div(forward)") {
+TEST_CASE("[cpu/operator] Div(forward)", "[Div][CPU]") {
     SECTION("2D Tensor by Singleton") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
@@ -37,14 +37,15 @@ TEST_CASE("[cpu/operator] Div(forward)") {
         });
 
         std::shared_ptr<Node> myDiv = Div();
-        myDiv->getOperator()->setDatatype(DataType::Float32);
-        myDiv->getOperator()->setBackend("cpu");
-        myDiv->getOperator()->associateInput(0, input_1);
-        myDiv->getOperator()->associateInput(1, input_2);
-        myDiv->getOperator()->computeOutputDims();
-        myDiv->forward();
-
-        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
+        op -> associateInput(0, input_1);
+        op -> associateInput(1, input_2);
+        op -> setDataType(DataType::Float32);
+        op -> setBackend("cpu");
+        op -> computeOutputDims();
+        myDiv -> forward();
+
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -73,14 +74,15 @@ TEST_CASE("[cpu/operator] Div(forward)") {
         });
 
         std::shared_ptr<Node> myDiv = Div();
-        myDiv->getOperator()->setDatatype(DataType::Float32);
-        myDiv->getOperator()->setBackend("cpu");
-        myDiv->getOperator()->associateInput(0, input_1);
-        myDiv->getOperator()->associateInput(1, input_2);
-        myDiv->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
+        op -> associateInput(0, input_1);
+        op -> associateInput(1, input_2);
+        op -> setDataType(DataType::Float32);
+        op -> setBackend("cpu");
+        op -> computeOutputDims();
         myDiv->forward();
 
-        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -112,14 +114,15 @@ TEST_CASE("[cpu/operator] Div(forward)") {
         });
 
         std::shared_ptr<Node> myDiv = Div();
-        myDiv->getOperator()->setDatatype(DataType::Float32);
-        myDiv->getOperator()->setBackend("cpu");
-        myDiv->getOperator()->associateInput(0, input_1);
-        myDiv->getOperator()->associateInput(1, input_2);
-        myDiv->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
+        op -> associateInput(0, input_1);
+        op -> associateInput(1, input_2);
+        op -> setDataType(DataType::Float32);
+        op -> setBackend("cpu");
+        op -> computeOutputDims();
         myDiv->forward();
 
-        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 12; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -191,14 +194,15 @@ TEST_CASE("[cpu/operator] Div(forward)") {
         });
 
         std::shared_ptr<Node> myDiv = Div();
-        myDiv->getOperator()->setDatatype(DataType::Float32);
-        myDiv->getOperator()->setBackend("cpu");
-        myDiv->getOperator()->associateInput(0, input_1);
-        myDiv->getOperator()->associateInput(1, input_2);
-        myDiv->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
+        op -> associateInput(0, input_1);
+        op -> associateInput(1, input_2);
+        op -> setDataType(DataType::Float32);
+        op -> setBackend("cpu");
+        op -> computeOutputDims();
         myDiv->forward();
 
-        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 54; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/operator/Test_FCImpl.cpp b/unit_tests/operator/Test_FCImpl.cpp
index e3494e20205f1a295eb537100b59fb7bbc26116a..4309ce1a54f14b1da0c8b173cb46992109ee034b 100644
--- a/unit_tests/operator/Test_FCImpl.cpp
+++ b/unit_tests/operator/Test_FCImpl.cpp
@@ -19,7 +19,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/oeprator] FC(forward)") {
+TEST_CASE("[cpu/oeprator] FC(forward)", "[FC][CPU]") {
     std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array2D<int, 5, 75>{
             {{1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
               5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
@@ -45,11 +45,10 @@ TEST_CASE("[cpu/oeprator] FC(forward)") {
     std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<int, 2, 5>{
             {{23601, 23602, 23603, 23604, 23605}, {68601, 68602, 68603, 68604, 68605}}});
 
-    std::shared_ptr<Node> myFC = FC(5, false, "myfc");
-    myFC->getOperator()->setDatatype(DataType::Int32);
-    myFC->getOperator()->setBackend("cpu");
-    myFC->getOperator()->associateInput(1, myWeights);
-    myFC->getOperator()->associateInput(2, myBias);
+    std::shared_ptr<Node> myFC = FC(75, 5, false, "myfc");
+    auto op = std::static_pointer_cast<OperatorTensor>(myFC -> getOperator());
+    op -> associateInput(1, myWeights);
+    op -> associateInput(2, myBias);
 
     SECTION("2D input") {
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<int, 2, 75>{
@@ -62,10 +61,12 @@ TEST_CASE("[cpu/oeprator] FC(forward)") {
                   105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
                   120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
                   135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}});
-        myFC->getOperator()->associateInput(0, myInput);
-        myFC->getOperator()->computeOutputDims();
+        op->associateInput(0, myInput);
+        op -> setDataType(DataType::Int32);
+        op -> setBackend("cpu");
+        op->computeOutputDims();
         myFC->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myFC->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
     SECTION("4D input") {
         std::shared_ptr<Tensor> myInput =
@@ -99,10 +100,12 @@ TEST_CASE("[cpu/oeprator] FC(forward)") {
                                                                      {135, 136, 137, 138, 139},
                                                                      {140, 141, 142, 143, 144},
                                                                      {145, 146, 147, 148, 149}}}}});
-        myFC->getOperator()->associateInput(0, myInput);
-        myFC->getOperator()->computeOutputDims();
+        op->associateInput(0, myInput);
+        op -> setDataType(DataType::Int32);
+        op -> setBackend("cpu");
+        op->computeOutputDims();
         myFC->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myFC->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     // std::cout << static_cast<Tensor>((*myFC->getOperator())["weight"])[0][0][0][0] << std::endl;
diff --git a/unit_tests/operator/Test_LeakyReLUImpl.cpp b/unit_tests/operator/Test_LeakyReLUImpl.cpp
index d5bd91ff75404a7b928c8919c64e06315b78206f..cad2a6f97a31e4e2200a8c8ceb1d9dde7b118362 100644
--- a/unit_tests/operator/Test_LeakyReLUImpl.cpp
+++ b/unit_tests/operator/Test_LeakyReLUImpl.cpp
@@ -18,7 +18,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
+TEST_CASE("[cpu/operator] LeakyReLU(forward)", "[LeakyReLU][CPU]") {
     SECTION("1D Tensor") {
         std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
             {0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
@@ -28,12 +28,13 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
         });
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU();
-        myLeakyReLU->getOperator()->setDatatype(DataType::Int32);
-        myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
-        myLeakyReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myLeakyReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myLeakyReLU->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myLeakyReLU->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("2D Tensor") {
@@ -51,12 +52,13 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
         });
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU();
-        myLeakyReLU->getOperator()->setDatatype(DataType::Int32);
-        myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
-        myLeakyReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myLeakyReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myLeakyReLU->forward();
-        REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("3D Tensor") {
@@ -86,12 +88,13 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
         });
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU();
-        myLeakyReLU->getOperator()->setDatatype(DataType::Int32);
-        myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
-        myLeakyReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myLeakyReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myLeakyReLU->forward();
-        REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("4D Tensor") {
@@ -145,12 +148,13 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
         });
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU();
-        myLeakyReLU->getOperator()->setDatatype(DataType::Int32);
-        myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
-        myLeakyReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myLeakyReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myLeakyReLU->forward();
-        REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("Test construction attribute: negative_slop") {
@@ -162,11 +166,12 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
         });
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU(0.5f);
-        myLeakyReLU->getOperator()->setDatatype(DataType::Float32);
-        myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
-        myLeakyReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myLeakyReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myLeakyReLU->forward();
-        REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MatMulImpl.cpp b/unit_tests/operator/Test_MatMulImpl.cpp
index 0da01b3287043e07e5b967df8882960cfb814f8f..1edb915fb78e3e056f455ddecb8e704eee068cd9 100644
--- a/unit_tests/operator/Test_MatMulImpl.cpp
+++ b/unit_tests/operator/Test_MatMulImpl.cpp
@@ -19,7 +19,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul]") {
+TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") {
     // Test MatMul forward with batch size = 2 and feature size = 75
     std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array2D<int, 5, 75>{
             {{1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
@@ -45,10 +45,9 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul]") {
     std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<int, 2, 5>{
             {{23600, 23600, 23600, 23600, 23600}, {68600, 68600, 68600, 68600, 68600}}});
 
-    std::shared_ptr<Node> myMatMul = MatMul(5, "mymatmul");
-    myMatMul->getOperator()->setDatatype(DataType::Int32);
-    myMatMul->getOperator()->setBackend("cpu");
-    myMatMul->getOperator()->associateInput(1, myWeights);
+    std::shared_ptr<Node> myMatMul = MatMul(75, 5, "mymatmul");
+    auto op = std::static_pointer_cast<OperatorTensor>(myMatMul -> getOperator());
+    op->associateInput(1, myWeights);
 
     SECTION("2D input") {
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<int, 2, 75>{
@@ -61,10 +60,12 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul]") {
                   105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
                   120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
                   135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}});
-        myMatMul->getOperator()->associateInput(0, myInput);
-        myMatMul->getOperator()->computeOutputDims();
+        op->associateInput(0, myInput);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myMatMul->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myMatMul->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
     SECTION("4D input") {
         std::shared_ptr<Tensor> myInput =
@@ -98,10 +99,12 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul]") {
                                                                      {135, 136, 137, 138, 139},
                                                                      {140, 141, 142, 143, 144},
                                                                      {145, 146, 147, 148, 149}}}}});
-        myMatMul->getOperator()->associateInput(0, myInput);
-        myMatMul->getOperator()->computeOutputDims();
+        op->associateInput(0, myInput);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myMatMul->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myMatMul->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     // std::cout << static_cast<Tensor>((*myMatMul->getOperator())["weight"])[0][0][0][0] << std::endl;
diff --git a/unit_tests/operator/Test_MaxPoolingImpl.cpp b/unit_tests/operator/Test_MaxPoolingImpl.cpp
index 83fa7eaa670399c8d6c085a14db08fa35df9de8c..9f528f2d044cf43133f3729a7f0e4f1bd95b8889 100644
--- a/unit_tests/operator/Test_MaxPoolingImpl.cpp
+++ b/unit_tests/operator/Test_MaxPoolingImpl.cpp
@@ -21,7 +21,7 @@
 using namespace Aidge;
 
 
-TEST_CASE("[cpu/operator] MaxPooling(forward)") {
+TEST_CASE("[cpu/operator] MaxPooling(forward)", "[MaxPooling][CPU]") {
     std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<float,2,2,5,5> { //NCHW
         {
             {
@@ -54,10 +54,9 @@ TEST_CASE("[cpu/operator] MaxPooling(forward)") {
     });
     SECTION("Stride") {
         std::shared_ptr<Node> myMaxPool = MaxPooling({2,2}, "mycdw", {2,2});
-        myMaxPool->getOperator()->setDatatype(DataType::Float32);
-        myMaxPool->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myMaxPool -> getOperator());
 
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,2,2,2> { 
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,2,2,2> {
             {
                 {
                     {{  0.7995,  0.6142},
@@ -74,9 +73,11 @@ TEST_CASE("[cpu/operator] MaxPooling(forward)") {
             }
         });
         myMaxPool->getOperator()->associateInput(0,myInput);
-        myMaxPool->getOperator()->computeOutputDims();
+        myMaxPool->getOperator()->setDataType(DataType::Float32);
+        myMaxPool->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myMaxPool->forward();
-        myMaxPool->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myMaxPool->getOperator()->getOutput(0)) == *myOutput);
+        op->getOutput(0)->print();
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MulImpl.cpp b/unit_tests/operator/Test_MulImpl.cpp
index cea62f998cfc538d1d5800639e461eb4d15cb270..1707bc81e0bb549bfe90078242f8a4eae77db3c3 100644
--- a/unit_tests/operator/Test_MulImpl.cpp
+++ b/unit_tests/operator/Test_MulImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Mul(forward)") {
+TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") {
     SECTION("2D Tensor by Singleton") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
@@ -37,14 +37,15 @@ TEST_CASE("[cpu/operator] Mul(forward)") {
         });
 
         std::shared_ptr<Node> myMul = Mul();
-        myMul->getOperator()->setDatatype(DataType::Float32);
-        myMul->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
         myMul->getOperator()->associateInput(0, input_1);
         myMul->getOperator()->associateInput(1, input_2);
-        myMul->getOperator()->computeOutputDims();
+        myMul->getOperator()->setDataType(DataType::Float32);
+        myMul->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myMul->forward();
 
-        float* resPtr = static_cast<float*>(myMul->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -73,14 +74,15 @@ TEST_CASE("[cpu/operator] Mul(forward)") {
         });
 
         std::shared_ptr<Node> myMul = Mul();
-        myMul->getOperator()->setDatatype(DataType::Float32);
-        myMul->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
         myMul->getOperator()->associateInput(0, input_1);
         myMul->getOperator()->associateInput(1, input_2);
-        myMul->getOperator()->computeOutputDims();
+        myMul->getOperator()->setDataType(DataType::Float32);
+        myMul->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myMul->forward();
 
-        float* resPtr = static_cast<float*>(myMul->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -112,14 +114,15 @@ TEST_CASE("[cpu/operator] Mul(forward)") {
         });
 
         std::shared_ptr<Node> myMul = Mul();
-        myMul->getOperator()->setDatatype(DataType::Float32);
-        myMul->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
         myMul->getOperator()->associateInput(0, input_1);
         myMul->getOperator()->associateInput(1, input_2);
-        myMul->getOperator()->computeOutputDims();
+        myMul->getOperator()->setDataType(DataType::Float32);
+        myMul->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myMul->forward();
 
-        float* resPtr = static_cast<float*>(myMul->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 12; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/operator/Test_PadImpl.cpp b/unit_tests/operator/Test_PadImpl.cpp
index b603e165392f1a861dc1b40d50b70a53c9256870..edcdaa9623e4a788f515ee99491accffcef576af 100644
--- a/unit_tests/operator/Test_PadImpl.cpp
+++ b/unit_tests/operator/Test_PadImpl.cpp
@@ -20,13 +20,12 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Pad(forward)") {
+TEST_CASE("[cpu/operator] Pad(forward)", "[Pad][CPU]") {
     SECTION("Symmetric Pad") {
         const int pv = 0; // pad value
 
         std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
-        myPad->getOperator()->setDatatype(DataType::Int32);
-        myPad->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
             {
                 {
@@ -125,18 +124,19 @@ TEST_CASE("[cpu/operator] Pad(forward)") {
         });
 
         myPad->getOperator()->associateInput(0,myInput);
-        myPad->getOperator()->computeOutputDims();
+        myPad->getOperator()->setDataType(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myPad->forward();
         // myPad->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     SECTION("Asymmetric Pad") {
         const int pv = 0; // pad value
 
         std::shared_ptr<Node> myPad = Pad<2>({1, 0, 0, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
-        myPad->getOperator()->setDatatype(DataType::Int32);
-        myPad->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
             {
                 {
@@ -229,16 +229,17 @@ TEST_CASE("[cpu/operator] Pad(forward)") {
         });
 
         myPad->getOperator()->associateInput(0,myInput);
-        myPad->getOperator()->computeOutputDims();
+        myPad->getOperator()->setDataType(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myPad->forward();
         // myPad->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     SECTION("Pad Edge") {
         std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Edge);
-        myPad->getOperator()->setDatatype(DataType::Int32);
-        myPad->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
             {
                 {
@@ -337,16 +338,17 @@ TEST_CASE("[cpu/operator] Pad(forward)") {
         });
 
         myPad->getOperator()->associateInput(0,myInput);
-        myPad->getOperator()->computeOutputDims();
+        myPad->getOperator()->setDataType(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myPad->forward();
         // myPad->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     SECTION("Pad Reflect") {
         std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Reflect);
-        myPad->getOperator()->setDatatype(DataType::Int32);
-        myPad->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
             {
                 {
@@ -453,16 +455,17 @@ TEST_CASE("[cpu/operator] Pad(forward)") {
         });
 
         myPad->getOperator()->associateInput(0,myInput);
-        myPad->getOperator()->computeOutputDims();
+        myPad->getOperator()->setDataType(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myPad->forward();
-         myPad->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+        op->getOutput(0)->print();
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     SECTION("Pad Wrap") {
         std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Wrap);
-        myPad->getOperator()->setDatatype(DataType::Int32);
-        myPad->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
             {
                 {
@@ -561,9 +564,11 @@ TEST_CASE("[cpu/operator] Pad(forward)") {
         });
 
         myPad->getOperator()->associateInput(0,myInput);
-        myPad->getOperator()->computeOutputDims();
+        myPad->getOperator()->setDataType(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myPad->forward();
         // myPad->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_PaddedConv.cpp b/unit_tests/operator/Test_PaddedConv.cpp
index e41be85ab00faae1af7239c43b74a34f558a663c..3baf0a7aa0f366a8f0dd4e3e9df6700a5cdb0cea 100644
--- a/unit_tests/operator/Test_PaddedConv.cpp
+++ b/unit_tests/operator/Test_PaddedConv.cpp
@@ -21,11 +21,10 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] PaddedConv(forward)") {
+TEST_CASE("[cpu/operator] PaddedConv(forward)", "[PaddedConv][CPU]") {
     SECTION("Classic Conv") {
         std::shared_ptr<Node> myConv = PaddedConv(3,4,{3,3}, "myconv");
-        myConv->getOperator()->setDatatype(DataType::Int32);
-        myConv->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myConv -> getOperator());
         std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
             {
                 {
@@ -117,7 +116,7 @@ TEST_CASE("[cpu/operator] PaddedConv(forward)") {
                 }
             }
         });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> { 
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> {
             {
                 {
                     {{ 15226,  15577,  15928},
@@ -153,15 +152,16 @@ TEST_CASE("[cpu/operator] PaddedConv(forward)") {
         myConv->getOperator()->associateInput(0,myInput);
         myConv->getOperator()->associateInput(1,myWeights);
         myConv->getOperator()->associateInput(2,myBias);
-        myConv->getOperator()->computeOutputDims();
+        myConv->getOperator()->setDataType(DataType::Int32);
+        myConv->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myConv->forward();
 
-        REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
     SECTION("test Padding") {
         std::shared_ptr<Node> myConv = PaddedConv(3,4,{3,3}, "myconv", {1,1}, {1,1,1,1});
-        myConv->getOperator()->setDatatype(DataType::Int32);
-        myConv->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myConv -> getOperator());
         std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
             {
                 {
@@ -253,7 +253,7 @@ TEST_CASE("[cpu/operator] PaddedConv(forward)") {
                 }
             }
         });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,5,5> { 
+        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,5,5> {
             {
                 {
                     {{  6895,  10225,  10486,  10747,   7063},
@@ -311,9 +311,11 @@ TEST_CASE("[cpu/operator] PaddedConv(forward)") {
         myConv->getOperator()->associateInput(0,myInput);
         myConv->getOperator()->associateInput(1,myWeights);
         myConv->getOperator()->associateInput(2,myBias);
-        myConv->getOperator()->computeOutputDims();
+        myConv->getOperator()->setDataType(DataType::Int32);
+        myConv->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myConv->forward();
 
-        REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 }
diff --git a/unit_tests/operator/Test_PowImpl.cpp b/unit_tests/operator/Test_PowImpl.cpp
index 7293198f411510904ee73aced47b69dfc37374af..0c95e785958aca72b5ae1f5727134552310e5bef 100644
--- a/unit_tests/operator/Test_PowImpl.cpp
+++ b/unit_tests/operator/Test_PowImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Pow(forward)") {
+TEST_CASE("[cpu/operator] Pow(forward)", "[Pow][CPU]") {
     SECTION("2D Tensor by Singleton") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
@@ -37,14 +37,15 @@ TEST_CASE("[cpu/operator] Pow(forward)") {
         });
 
         std::shared_ptr<Node> myPow = Pow();
-        myPow->getOperator()->setDatatype(DataType::Float32);
-        myPow->getOperator()->setBackend("cpu");
-        myPow->getOperator()->associateInput(0, input_1);
-        myPow->getOperator()->associateInput(1, input_2);
-        myPow->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
+        op->associateInput(0, input_1);
+        op->associateInput(1, input_2);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myPow->forward();
 
-        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -76,14 +77,15 @@ TEST_CASE("[cpu/operator] Pow(forward)") {
         });
 
         std::shared_ptr<Node> myPow = Pow();
-        myPow->getOperator()->setDatatype(DataType::Float32);
-        myPow->getOperator()->setBackend("cpu");
-        myPow->getOperator()->associateInput(0, input_1);
-        myPow->getOperator()->associateInput(1, input_2);
-        myPow->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
+        op->associateInput(0, input_1);
+        op->associateInput(1, input_2);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myPow->forward();
 
-        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 12; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -112,14 +114,15 @@ TEST_CASE("[cpu/operator] Pow(forward)") {
         });
 
         std::shared_ptr<Node> myPow = Pow();
-        myPow->getOperator()->setDatatype(DataType::Float32);
-        myPow->getOperator()->setBackend("cpu");
-        myPow->getOperator()->associateInput(0, input_1);
-        myPow->getOperator()->associateInput(1, input_2);
-        myPow->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
+        op->associateInput(0, input_1);
+        op->associateInput(1, input_2);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myPow->forward();
 
-        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -187,14 +190,15 @@ TEST_CASE("[cpu/operator] Pow(forward)") {
         });
 
         std::shared_ptr<Node> myPow = Pow();
-        myPow->getOperator()->setDatatype(DataType::Float32);
-        myPow->getOperator()->setBackend("cpu");
-        myPow->getOperator()->associateInput(0, input_1);
-        myPow->getOperator()->associateInput(1, input_2);
-        myPow->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
+        op->associateInput(0, input_1);
+        op->associateInput(1, input_2);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myPow->forward();
 
-        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 54; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/operator/Test_ReLUImpl.cpp b/unit_tests/operator/Test_ReLUImpl.cpp
index 9752a4914b5cb3cd06f2654cf64e0c193c5dd65b..c4166ac4dba75d6719fc2f38f980065126948e1f 100644
--- a/unit_tests/operator/Test_ReLUImpl.cpp
+++ b/unit_tests/operator/Test_ReLUImpl.cpp
@@ -21,7 +21,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] ReLU(forward)") {
+TEST_CASE("[cpu/operator] ReLU(forward)", "[ReLU][CPU]") {
     SECTION("1D Tensor") {
         std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
             {0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
@@ -31,12 +31,13 @@ TEST_CASE("[cpu/operator] ReLU(forward)") {
         });
 
         std::shared_ptr<Node> myReLU = ReLU();
-        myReLU->getOperator()->setDatatype(DataType::Int32);
-        myReLU->getOperator()->setBackend("cpu");
-        myReLU->getOperator()->associateInput(0,input0);
-        myReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myReLU->forward();
-        REQUIRE(*(myReLU->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("2D Tensor") {
@@ -54,12 +55,13 @@ TEST_CASE("[cpu/operator] ReLU(forward)") {
         });
 
         std::shared_ptr<Node> myReLU = ReLU();
-        myReLU->getOperator()->setDatatype(DataType::Int32);
-        myReLU->getOperator()->setBackend("cpu");
-        myReLU->getOperator()->associateInput(0,input0);
-        myReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myReLU->forward();
-        REQUIRE(*myReLU->getOperator()->getOutput(0) == *expectedOutput);
+        REQUIRE(*op->getOutput(0) == *expectedOutput);
     }
 
     SECTION("3D Tensor") {
@@ -89,12 +91,13 @@ TEST_CASE("[cpu/operator] ReLU(forward)") {
         });
 
         std::shared_ptr<Node> myReLU = ReLU();
-        myReLU->getOperator()->setDatatype(DataType::Int32);
-        myReLU->getOperator()->setBackend("cpu");
-        myReLU->getOperator()->associateInput(0,input0);
-        myReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myReLU->forward();
-        REQUIRE(*(myReLU->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("4D Tensor") {
@@ -148,11 +151,12 @@ TEST_CASE("[cpu/operator] ReLU(forward)") {
         });
 
         std::shared_ptr<Node> myReLU = ReLU();
-        myReLU->getOperator()->setDatatype(DataType::Int32);
-        myReLU->getOperator()->setBackend("cpu");
-        myReLU->getOperator()->associateInput(0,input0);
-        myReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myReLU->forward();
-        REQUIRE(*myReLU->getOperator()->getOutput(0) == *expectedOutput);
+        REQUIRE(*op->getOutput(0) == *expectedOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_SliceImpl.cpp b/unit_tests/operator/Test_SliceImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3e25c28f9caac61c64d38fa70879af79d20392bc
--- /dev/null
+++ b/unit_tests/operator/Test_SliceImpl.cpp
@@ -0,0 +1,166 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Slice.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
+    SECTION("1D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
+            {0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,4> {
+            {0, 1, 2,-3}
+        });
+
+        std::shared_ptr<Node> mySlice = Slice(0, {4});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
+        mySlice->forward();
+        // mySlice->getOperator()->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("2D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array2D<int,2,10> {
+            {
+                { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<int,2,3> {
+            {
+                {-5,-6, 7},
+                {-5,-6, 7}
+            }
+        });
+
+        std::shared_ptr<Node> mySlice = Slice(5, {2,3});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
+        mySlice->forward();
+        // mySlice->getOperator()->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("3D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<int,2,2,10> {
+            {
+                {
+                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                },
+                {
+                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,1,1,3> {
+            {
+                {
+                    { 4,-5,-6}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> mySlice = Slice(14, {1,1,3});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
+        mySlice->forward();
+        // mySlice->getOperator()->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("4D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
+            {
+                {
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    }
+                },
+                {
+                    {
+                        { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3,11,-5,-6, 7,-1,10}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
+            {
+                {
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    }
+                },
+                {
+                    {
+                        { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3,11,-5,-6, 7,-1,10}
+                    }
+                }
+            }
+        });
+
+        std::shared_ptr<Node> mySlice = Slice(0, {2,2,2,10});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
+        mySlice->forward();
+        // mySlice->getOperator()->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_SoftmaxImpl.cpp b/unit_tests/operator/Test_SoftmaxImpl.cpp
index bad34102b589e3d73956f43593456d885373b3de..3d3c9fe4a0de0183e9069b814084aa80019adf0f 100644
--- a/unit_tests/operator/Test_SoftmaxImpl.cpp
+++ b/unit_tests/operator/Test_SoftmaxImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Softmax(forward)") {
+TEST_CASE("[cpu/operator] Softmax(forward)", "[Softmax][CPU]") {
     SECTION("2D Tensor") {
         std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,10> {
             {
@@ -40,13 +40,14 @@ TEST_CASE("[cpu/operator] Softmax(forward)") {
         });
 
         std::shared_ptr<Node> mySoftmax = Softmax();
-        mySoftmax->getOperator()->setDatatype(DataType::Float32);
-        mySoftmax->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySoftmax -> getOperator());
         mySoftmax->getOperator()->associateInput(0,input);
-        mySoftmax->getOperator()->computeOutputDims();
+        mySoftmax->getOperator()->setDataType(DataType::Float32);
+        mySoftmax->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySoftmax->forward();
 
-        float* resPtr = static_cast<float*>(mySoftmax->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 20; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -108,13 +109,14 @@ TEST_CASE("[cpu/operator] Softmax(forward)") {
         });
 
         std::shared_ptr<Node> mySoftmax = Softmax();
-        mySoftmax->getOperator()->setDatatype(DataType::Float32);
-        mySoftmax->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySoftmax -> getOperator());
         mySoftmax->getOperator()->associateInput(0,input);
-        mySoftmax->getOperator()->computeOutputDims();
+        mySoftmax->getOperator()->setDataType(DataType::Float32);
+        mySoftmax->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySoftmax->forward();
 
-        float* resPtr = static_cast<float*>(mySoftmax->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 54; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/operator/Test_SqrtImpl.cpp b/unit_tests/operator/Test_SqrtImpl.cpp
index cf17499aba50359547218adc6b3938176e729ed3..653ecf0d04907ad8f7887e79cf149d79b37a9bbc 100644
--- a/unit_tests/operator/Test_SqrtImpl.cpp
+++ b/unit_tests/operator/Test_SqrtImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Sqrt(forward)") {
+TEST_CASE("[cpu/operator] Sqrt(forward)", "[Sqrt][CPU]") {
     SECTION("2D Tensor") {
         std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
@@ -36,13 +36,14 @@ TEST_CASE("[cpu/operator] Sqrt(forward)") {
         });
 
         std::shared_ptr<Node> mySqrt = Sqrt();
-        mySqrt->getOperator()->setDatatype(DataType::Float32);
-        mySqrt->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySqrt -> getOperator());
         mySqrt->getOperator()->associateInput(0,input);
-        mySqrt->getOperator()->computeOutputDims();
+        mySqrt->getOperator()->setDataType(DataType::Float32);
+        mySqrt->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySqrt->forward();
 
-        float* resPtr = static_cast<float*>(mySqrt->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -106,13 +107,14 @@ TEST_CASE("[cpu/operator] Sqrt(forward)") {
         });
 
         std::shared_ptr<Node> mySqrt = Sqrt();
-        mySqrt->getOperator()->setDatatype(DataType::Float32);
-        mySqrt->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySqrt -> getOperator());
         mySqrt->getOperator()->associateInput(0,input);
-        mySqrt->getOperator()->computeOutputDims();
+        mySqrt->getOperator()->setDataType(DataType::Float32);
+        mySqrt->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySqrt->forward();
 
-        float* resPtr = static_cast<float*>(mySqrt->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 54; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/operator/Test_SubImpl.cpp b/unit_tests/operator/Test_SubImpl.cpp
index d741602cf02958a88bb41bbd2927577027acb069..dfd64078b77a557e07eb11cb958ac24eeb1f9aa3 100644
--- a/unit_tests/operator/Test_SubImpl.cpp
+++ b/unit_tests/operator/Test_SubImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Sub(forward)") {
+TEST_CASE("[cpu/operator] Sub(forward)", "[Sub][CPU]") {
     SECTION("2D Tensor by Singleton") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
@@ -37,14 +37,15 @@ TEST_CASE("[cpu/operator] Sub(forward)") {
         });
 
         std::shared_ptr<Node> mySub = Sub();
-        mySub->getOperator()->setDatatype(DataType::Float32);
-        mySub->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
         mySub->getOperator()->associateInput(0, input_1);
         mySub->getOperator()->associateInput(1, input_2);
-        mySub->getOperator()->computeOutputDims();
+        mySub->getOperator()->setDataType(DataType::Float32);
+        mySub->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySub->forward();
 
-        float* resPtr = static_cast<float*>(mySub->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -73,14 +74,15 @@ TEST_CASE("[cpu/operator] Sub(forward)") {
         });
 
         std::shared_ptr<Node> mySub = Sub();
-        mySub->getOperator()->setDatatype(DataType::Float32);
-        mySub->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
         mySub->getOperator()->associateInput(0, input_1);
         mySub->getOperator()->associateInput(1, input_2);
-        mySub->getOperator()->computeOutputDims();
+        mySub->getOperator()->setDataType(DataType::Float32);
+        mySub->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySub->forward();
 
-        float* resPtr = static_cast<float*>(mySub->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -112,14 +114,15 @@ TEST_CASE("[cpu/operator] Sub(forward)") {
         });
 
         std::shared_ptr<Node> mySub = Sub();
-        mySub->getOperator()->setDatatype(DataType::Float32);
-        mySub->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
         mySub->getOperator()->associateInput(0, input_1);
         mySub->getOperator()->associateInput(1, input_2);
-        mySub->getOperator()->computeOutputDims();
+        mySub->getOperator()->setDataType(DataType::Float32);
+        mySub->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySub->forward();
 
-        float* resPtr = static_cast<float*>(mySub->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 12; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/recipies/Test_HorizontalTiling.cpp b/unit_tests/recipies/Test_HorizontalTiling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b71a01d130a783caf5c643dfb0c3757b1c524e5e
--- /dev/null
+++ b/unit_tests/recipies/Test_HorizontalTiling.cpp
@@ -0,0 +1,208 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <set>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/recipies/Recipies.hpp"
+#include "aidge/scheduler/Scheduler.hpp"
+#include "aidge/operator/Concat.hpp"
+
+
+namespace Aidge {
+
+TEST_CASE("[core/recipies] Tiling(transformation)", "[Tiling][Recipies]") {
+
+    SECTION("Transform a pre-generated GraphView") {
+
+        SECTION("Simple Node: Conv") {
+            std::shared_ptr<Node> myReLU = ReLU("myReLU");
+            std::shared_ptr<Node> myConv = Conv(3,4,{3,3}, "myconv");
+            std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
+                {
+                    {
+                        {{  0,   1,   2},
+                         {  3,   4,   5},
+                         {  6,   7,   8}},
+                        {{  9,  10,  11},
+                         { 12,  13,  14},
+                         { 15,  16,  17}},
+                        {{ 18,  19,  20},
+                         { 21,  22,  23},
+                         { 24,  25,  26}}
+                    },
+                    {
+                        {{ 27,  28,  29},
+                         { 30,  31,  32},
+                         { 33,  34,  35}},
+                        {{ 36,  37,  38},
+                         { 39,  40,  41},
+                         { 42,  43,  44}},
+                        {{ 45,  46,  47},
+                         { 48,  49,  50},
+                         { 51,  52,  53}}
+                    },
+                    {
+                        {{ 54,  55,  56},
+                         { 57,  58,  59},
+                         { 60,  61,  62}},
+                        {{ 63,  64,  65},
+                         { 66,  67,  68},
+                         { 69,  70,  71}},
+                        {{ 72,  73,  74},
+                         { 75,  76,  77},
+                         { 78,  79,  80}}
+                    },
+                    {
+                        {{ 81,  82,  83},
+                         { 84,  85,  86},
+                         { 87,  88,  89}},
+                        {{ 90,  91,  92},
+                         { 93,  94,  95},
+                         { 96,  97,  98}},
+                        {{ 99, 100, 101},
+                         {102, 103, 104},
+                         {105, 106, 107}}
+                    }
+                }
+            });
+            std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
+            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
+                {
+                    {
+                        {{  0,   1,   2,   3,   4},
+                        {  5,   6,   7,   8,   9},
+                        { 10,  11,  12,  13,  14},
+                        { 15,  16,  17,  18,  19},
+                        { 20,  21,  22,  23,  24}},
+
+                        {{ 25,  26,  27,  28,  29},
+                        { 30,  31,  32,  33,  34},
+                        { 35,  36,  37,  38,  39},
+                        { 40,  41,  42,  43,  44},
+                        { 45,  46,  47,  48,  49}},
+
+                        {{ 50,  51,  52,  53,  54},
+                        { 55,  56,  57,  58,  59},
+                        { 60,  61,  62,  63,  64},
+                        { 65,  66,  67,  68,  69},
+                        { 70,  71,  72,  73,  74}}
+                    },
+                    {
+                        {{ 75,  76,  77,  78,  79},
+                        { 80,  81,  82,  83,  84},
+                        { 85,  86,  87,  88,  89},
+                        { 90,  91,  92,  93,  94},
+                        { 95,  96,  97,  98,  99}},
+
+                        {{100, 101, 102, 103, 104},
+                        {105, 106, 107, 108, 109},
+                        {110, 111, 112, 113, 114},
+                        {115, 116, 117, 118, 119},
+                        {120, 121, 122, 123, 124}},
+
+                        {{125, 126, 127, 128, 129},
+                        {130, 131, 132, 133, 134},
+                        {135, 136, 137, 138, 139},
+                        {140, 141, 142, 143, 144},
+                        {145, 146, 147, 148, 149}}
+                    }
+                }
+            });
+            std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> {
+                {
+                    {
+                        {{ 15226,  15577,  15928},
+                         { 16981,  17332,  17683},
+                         { 18736,  19087,  19438}},
+
+                        {{ 37818,  38898,  39978},
+                         { 43218,  44298,  45378},
+                         { 48618,  49698,  50778}},
+
+                        {{ 60426,  62235,  64044},
+                         { 69471,  71280,  73089},
+                         { 78516,  80325,  82134}},
+
+                        {{ 83016,  85554,  88092},
+                         { 95706,  98244, 100782},
+                         {108396, 110934, 113472}}
+                    },
+                    {
+                        {{ 41551,  41902,  42253},
+                         { 43306,  43657,  44008},
+                         { 45061,  45412,  45763}},
+
+                        {{118818, 119898, 120978},
+                         {124218, 125298, 126378},
+                         {129618, 130698, 131778}},
+
+                        {{196101, 197910, 199719},
+                         {205146, 206955, 208764},
+                         {214191, 216000, 217809}},
+
+                        {{273366, 275904, 278442},
+                         {286056, 288594, 291132},
+                         {298746, 301284, 303822}}
+                    }
+                }
+            });
+            myReLU->getOperator()->associateInput(0, myInput);
+            myReLU->addChild(myConv, 0, 0);
+            myConv->getOperator()->setInput(1, myWeights);
+            myConv->getOperator()->setInput(2, myBias);
+            std::dynamic_pointer_cast<Conv_Op<2>>(myConv->getOperator())->computeOutputDims();
+
+            std::shared_ptr<GraphView> g = std::make_shared<GraphView>();
+            g->add({myReLU, myConv});
+            g->compile("cpu", DataType::Int32);
+            std::set<std::shared_ptr<Node>> tiledConv = getConvHorizontalTiling(myConv, 2, 3);
+
+            SequentialScheduler s(g);
+            s.forward();
+            REQUIRE(*(std::dynamic_pointer_cast<Conv_Op<2>>(myConv->getOperator())->getOutput(0)) == *myOutput);
+
+            GraphView::replace({myConv, myConv->getParent(1), myConv->getParent(2)}, tiledConv);
+            g->compile("cpu", DataType::Int32);
+            s.resetScheduling();
+            s.forward();
+
+            REQUIRE(*(std::dynamic_pointer_cast<OperatorTensor>((*g->outputNodes().begin())->getOperator())->getOutput(0)) == *myOutput);
+        }
+    }
+}
+}
+        // std::shared_ptr<GraphView> g = Sequential({
+        //     Conv(3, 16, {3,3}, "conv1"),
+        //     ReLU("relu1"),
+        //     Conv(16, 32, {1,1}, "conv2"),
+        //     Conv(32, 16, {1,1}, "conv3"),
+        //     Conv(16, 10, {3,3}, "conv4"),
+        //     ReLU("relu2")
+        // });
+
+    //     for (auto& individualConv : g->match("Conv")) {
+    //         auto tiledConv = horizontalTiling(individualConv);
+    //         g->replace(individualConv, tiledConv);
+    //     }
+    // }
+
+    // SECTION("Create the GraphView with tiled layers") {
+    //     std::shared_ptr<GraphView> g;
+    //     g->addChild(horizontalTiling(Conv()))
+    // }
+
+// }
+// } // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp
index 78ab8d5b149e8f702558658fef0442f225de3813..8ea8e726f286035a1059a317471b893ce4639251 100644
--- a/unit_tests/scheduler/Test_Scheduler.cpp
+++ b/unit_tests/scheduler/Test_Scheduler.cpp
@@ -50,13 +50,11 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
                     Conv(1, 3, {3, 3}, "conv1"),
                     Conv(3, 4, {1, 1}, "conv2"),
                     Conv(4, 3, {1, 1}, "conv3"),
-                    FC(5, false, "fc")});
-        g->setDatatype(Aidge::DataType::Int32);
-        g->setBackend("cpu");
+                    FC(27, 5, false, "fc")});
 
-        g->getNode("conv1")->getOperator()->input(0) = *inputTensor;
-        g->getNode("conv1")->getOperator()->input(1) = *weight1;
-        g->getNode("conv1")->getOperator()->input(2) = *bias1;
+        g->getNode("conv1")->getOperator()->setInput(0, inputTensor);
+        g->getNode("conv1")->getOperator()->setInput(1, weight1);
+        g->getNode("conv1")->getOperator()->setInput(2, bias1);
 
         std::shared_ptr<Tensor> weight2 =
                 std::make_shared<Tensor>(Array4D<int, 4, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}},
@@ -64,8 +62,8 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
                                                                    {{{7}}, {{8}}, {{9}}},
                                                                    {{{10}}, {{11}}, {{12}}}}});
         std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 4>{{1, 2, 3, 4}});
-        g->getNode("conv2")->getOperator()->input(1) = *weight2;
-        g->getNode("conv2")->getOperator()->input(2) = *bias2;
+        g->getNode("conv2")->getOperator()->setInput(1, weight2);
+        g->getNode("conv2")->getOperator()->setInput(2, bias2);
         // *(g->getNode("conv2")->getOperator()->input(1, weight2);
 
         std::shared_ptr<Tensor> weight3 = std::make_shared<Tensor>(
@@ -73,8 +71,8 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
                                           {{{5}}, {{6}}, {{7}}, {{8}}},
                                           {{{9}}, {{10}}, {{11}}, {{12}}}}});
         std::shared_ptr<Tensor> bias3 = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
-        g->getNode("conv3")->getOperator()->input(1) = *weight3;
-        g->getNode("conv3")->getOperator()->input(2) = *bias3;
+        g->getNode("conv3")->getOperator()->setInput(1, weight3);
+        g->getNode("conv3")->getOperator()->setInput(2, bias3);
 
         std::shared_ptr<Tensor> weightfc = std::make_shared<Tensor>(
                 Array2D<int, 5, 27>{{{1,  2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
@@ -88,10 +86,12 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
                                      {4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2,
                                       3, 4, 5, 6, 7, 8, 9,  10, 11, 12, 13, 14, 15}}});
         std::shared_ptr<Tensor> biasfc = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
-        g->getNode("fc")->getOperator()->input(1) = *weightfc;
-        g->getNode("fc")->getOperator()->input(2) = *biasfc;
+        g->getNode("fc")->getOperator()->setInput(1, weightfc);
+        g->getNode("fc")->getOperator()->setInput(2, biasfc);
 
         // input->addChild(g);
+        g->setDataType(Aidge::DataType::Int32);
+        g->setBackend("cpu");
         g->forwardDims();
         SequentialScheduler scheduler(g);
         REQUIRE_NOTHROW(scheduler.forward());
@@ -126,17 +126,17 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
         Tensor expectedOutput4 = Array2D<int, 2, 5>{
                 {{205050376, 198925904, 181355097, 196978090, 238868348},
                 {598467376, 561797804, 560823897, 593043790, 698672948}}};
-        Tensor other1 = g->getNode("conv1")->getOperator()->output(0);
-        bool equal1 = (other1 == *expectedOutput1);
+        std::shared_ptr<Tensor> other1 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv1")->getOperator())->getOutput(0);
+        bool equal1 = (*other1 == *expectedOutput1);
         REQUIRE(equal1);
-        Tensor other2 = g->getNode("conv2")->getOperator()->output(0);
-        bool equal2 = (other2 == *expectedOutput2);
+        std::shared_ptr<Tensor> other2 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv2")->getOperator())->getOutput(0);
+        bool equal2 = (*other2 == *expectedOutput2);
         REQUIRE(equal2);
-        Tensor other3 = g->getNode("conv3")->getOperator()->output(0);
-        bool equal3 = (other3 == *expectedOutput3);
+        std::shared_ptr<Tensor> other3 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv3")->getOperator())->getOutput(0);
+        bool equal3 = (*other3 == *expectedOutput3);
         REQUIRE(equal3);
-        Tensor other4 = g->getNode("fc")->getOperator()->output(0);
-        bool equal4 = (other4 == expectedOutput4);
+        std::shared_ptr<Tensor> other4 = std::static_pointer_cast<OperatorTensor>(g->getNode("fc")->getOperator())->getOutput(0);
+        bool equal4 = (*other4 == expectedOutput4);
         REQUIRE(equal4);
     }
 
@@ -147,36 +147,34 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
                                 Conv(3, 3, {1, 1}, "conv1.1"),
                                 Conv(3, 3, {1, 1}, "conv1.2"),
                                 Conv(3, 3, {1, 1}, "conv1.3")}),
-                            Add<3>("add1"),
+                            Add(3, "add1"),
                             Conv(3, 2, {1, 1}, "conv2"),
-                            FC(5, false, "out")});
-        g->setBackend("cpu");
-        g->setDatatype(Aidge::DataType::Int32);
+                            FC(18, 5, false, "out")});
 
-        g->getNode("inputConv")->getOperator()->input(0) = *inputTensor;
-        g->getNode("inputConv")->getOperator()->input(1) = *weight1;
-        g->getNode("inputConv")->getOperator()->input(2) = *bias1;
+        g->getNode("inputConv")->getOperator()->setInput(0, inputTensor);
+        g->getNode("inputConv")->getOperator()->setInput(1, weight1);
+        g->getNode("inputConv")->getOperator()->setInput(2, bias1);
 
         std::shared_ptr<Tensor> conv11Weight = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
                 {{{{1}}, {{2}}, {{3}}}, {{{4}}, {{5}}, {{6}}}, {{{7}}, {{8}}, {{9}}}}});
-        g->getNode("conv1.1")->getOperator()->input(1) = *conv11Weight;
-        g->getNode("conv1.1")->getOperator()->input(2) = *bias1;
+        g->getNode("conv1.1")->getOperator()->setInput(1, conv11Weight);
+        g->getNode("conv1.1")->getOperator()->setInput(2, bias1);
 
         std::shared_ptr<Tensor> conv12Weight = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
                 {{{{11}}, {{12}}, {{13}}}, {{{14}}, {{15}}, {{16}}}, {{{17}}, {{18}}, {{19}}}}});
-        g->getNode("conv1.2")->getOperator()->input(1) = *conv12Weight;
-        g->getNode("conv1.2")->getOperator()->input(2) = *bias1;
+        g->getNode("conv1.2")->getOperator()->setInput(1, conv12Weight);
+        g->getNode("conv1.2")->getOperator()->setInput(2, bias1);
 
         std::shared_ptr<Tensor> conv13Weight = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
                 {{{{21}}, {{22}}, {{23}}}, {{{24}}, {{25}}, {{26}}}, {{{27}}, {{28}}, {{29}}}}});
-        g->getNode("conv1.3")->getOperator()->input(1) = *conv13Weight;
-        g->getNode("conv1.3")->getOperator()->input(2) = *bias1;
+        g->getNode("conv1.3")->getOperator()->setInput(1, conv13Weight);
+        g->getNode("conv1.3")->getOperator()->setInput(2, bias1);
 
         std::shared_ptr<Tensor> conv2Weight = std::make_shared<Tensor>(
                 Array4D<int, 2, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}}, {{{4}}, {{5}}, {{6}}}}});
         std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 2>{{1, 2}});
-        g->getNode("conv2")->getOperator()->input(1) = *conv2Weight;
-        g->getNode("conv2")->getOperator()->input(2) = *bias2;
+        g->getNode("conv2")->getOperator()->setInput(1, conv2Weight);
+        g->getNode("conv2")->getOperator()->setInput(2, bias2);
 
         std::shared_ptr<Tensor> fcWeight = std::make_shared<Tensor>(
                 Array2D<int, 5, 18>{{{1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3},
@@ -185,19 +183,21 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
                                      {5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2},
                                      {3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}}});
         std::shared_ptr<Tensor> fcBias = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
-        g->getNode("out")->getOperator()->input(1) = *fcWeight;
-        g->getNode("out")->getOperator()->input(2) = *fcBias;
+        g->getNode("out")->getOperator()->setInput(1, fcWeight);
+        g->getNode("out")->getOperator()->setInput(2, fcBias);
 
         std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(
                 Array2D<int, 2, 5>{{{124324368, 130692907, 133325056, 125044620, 142843879},
                                     {369195468, 394615207, 382643056, 379441320, 416291779}}});
 
+        g->setBackend("cpu");
+        g->setDataType(Aidge::DataType::Int32);
         g->forwardDims();
         SequentialScheduler scheduler(g);
         REQUIRE_NOTHROW(scheduler.forward());
         scheduler.saveSchedulingDiagram("schedulingSequential");
         std::shared_ptr<Tensor> result =
-                std::static_pointer_cast<Tensor>(g->getNode("out")->getOperator()->getOutput(0));
+                std::static_pointer_cast<Tensor>(g->getNode("out")->getOperator()->getRawOutput(0));
         bool equal = (*result == *expectedOutput);
         REQUIRE(equal);
     }