diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp
index 6cb72e9100b1437afa13a23cb5933e77aabaaae8..7a1497a2f4a2ae0e6005897ae504502505bbe60a 100644
--- a/include/aidge/backend/cpu/operator/AddImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl.hpp
@@ -12,16 +12,17 @@
 #ifndef AIDGE_CPU_OPERATOR_ADDIMPL_H_
 #define AIDGE_CPU_OPERATOR_ADDIMPL_H_
 
+#include <cstddef>  // std::size_t
+#include <memory>   // std::unique_ptr, std::make_unique
+#include <string>
+#include <vector>
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Add.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include <memory>
-#include <vector>
 
 namespace Aidge {
-// class Add_Op<2>;
 
 // compute kernel registry for forward and backward
 class AddImplForward_cpu
@@ -33,7 +34,7 @@ class AddImplBackward_cpu
 
 class AddImpl_cpu : public OperatorImpl {
 public:
-    AddImpl_cpu(const Add_Op& op) : OperatorImpl(op) {}
+    AddImpl_cpu(const Add_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<AddImpl_cpu> create(const Add_Op& op) {
         return std::make_unique<AddImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
index 38dbd4b528b4f0fbd24f7f8d2b53e7ea16bae5d0..ce126dc2b870d6ac767c15bc6fbca2deb07e8772 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
@@ -38,7 +38,7 @@ class AvgPoolingImpl2DBackward_cpu
 
 class AvgPoolingImpl2D_cpu : public OperatorImpl {
 public:
-    AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : OperatorImpl(op) {}
+    AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<AvgPoolingImpl2D_cpu> create(const AvgPooling_Op<2> &op) {
         return std::make_unique<AvgPoolingImpl2D_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
index 92797ab09148f16255851f4bf51d7c62b7bd6f70..8bd567dab3d564ccdeffdc581585e404fc4697a4 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
@@ -53,7 +53,7 @@ class BatchNormImpl2DBackward_cpu
 
 class BatchNormImpl2D_cpu : public OperatorImpl {
 public:
-    BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : OperatorImpl(op) {}
+    BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<BatchNormImpl2D_cpu> create(const BatchNorm_Op<2> &op) {
         return std::make_unique<BatchNormImpl2D_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/ConcatImpl.hpp b/include/aidge/backend/cpu/operator/ConcatImpl.hpp
index 02d52c850a5a3e628980fcc7502ffab8aa166e17..a997ffa9860f87fe0d9bc4e64239a656053416a6 100644
--- a/include/aidge/backend/cpu/operator/ConcatImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConcatImpl.hpp
@@ -41,7 +41,7 @@ class ConcatImplBackward_cpu
 
 class ConcatImpl_cpu : public OperatorImpl {
 public:
-    ConcatImpl_cpu(const Concat_Op& op) : OperatorImpl(op) {}
+    ConcatImpl_cpu(const Concat_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ConcatImpl_cpu> create(const Concat_Op& op) {
         return std::make_unique<ConcatImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
index 44bc5da3fa752d9fd52e43366099d20de35d866e..a61a7299ed6bd5c5a3e41c09e9d5b5f1f7ae3326 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
@@ -40,7 +40,7 @@ class ConvDepthWiseImpl2DBackward_cpu
 
 class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
 public:
-    ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : OperatorImpl(op) {}
+    ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ConvDepthWiseImpl2D_cpu> create(const ConvDepthWise_Op<2> &op) {
         return std::make_unique<ConvDepthWiseImpl2D_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp
index 2915210dbdeb9b32aca006a171efbca9ccc288b5..e7ce0892a6241009a8e80821e341b3209a19faa4 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp
@@ -40,7 +40,7 @@ class ConvImpl2DBackward_cpu
 
 class ConvImpl2D_cpu : public OperatorImpl {
    public:
-    ConvImpl2D_cpu(const Conv_Op<2>& op) : OperatorImpl(op) {}
+    ConvImpl2D_cpu(const Conv_Op<2>& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ConvImpl2D_cpu> create(const Conv_Op<2> &op) {
         return std::make_unique<ConvImpl2D_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp
index 6bedf627548f63cf14626c69bf91fbd8c9434784..3a19d7303464e3543bd1ce83e334c4a6bdb713a2 100644
--- a/include/aidge/backend/cpu/operator/DivImpl.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl.hpp
@@ -34,7 +34,7 @@ class DivImplBackward_cpu
 
 class DivImpl_cpu : public OperatorImpl {
 public:
-    DivImpl_cpu(const Div_Op& op) : OperatorImpl(op) {}
+    DivImpl_cpu(const Div_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<DivImpl_cpu> create(const Div_Op& op) {
         return std::make_unique<DivImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/ErfImpl.hpp b/include/aidge/backend/cpu/operator/ErfImpl.hpp
index 517eab354a7f44f1d4c7ebbc33efe12edd4159d1..6864803a542e4beed0259be9c4722d4215bec449 100644
--- a/include/aidge/backend/cpu/operator/ErfImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ErfImpl.hpp
@@ -32,7 +32,7 @@ class ErfImplBackward_cpu
 
 class ErfImpl_cpu : public OperatorImpl {
 public:
-    ErfImpl_cpu(const Erf_Op& op) : OperatorImpl(op) {}
+    ErfImpl_cpu(const Erf_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ErfImpl_cpu> create(const Erf_Op& op) {
         return std::make_unique<ErfImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp
index 86bb7fd1271e5857b595dda8efc0354851c94b7e..fedd8b38b2dbee9e5fd288a07d5cd722470723e5 100644
--- a/include/aidge/backend/cpu/operator/FCImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl.hpp
@@ -26,23 +26,42 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class FCImplForward_cpu : public Registrable<FCImplForward_cpu,
-                                                 std::tuple<DataType, DataType, DataType, DataType>,
-                                                 void(const FC_Op::Attrs &, const DimSize_t, const DimSize_t,
-                                                      const void *, const void *, const void *, void *)> {};
+                                             std::tuple<DataType,
+                                                        DataType,
+                                                        DataType,
+                                                        DataType>,
+                                             void(const FC_Op::Attrs&,
+                                                  const DimSize_t,
+                                                  const DimSize_t,
+                                                  const void *,
+                                                  const void *,
+                                                  const void *,
+                                                  void *)> {};
 class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu,
-                                                  std::tuple<DataType, DataType, DataType, DataType>,
-                                                  void(const FC_Op::Attrs &, const DimSize_t, const DimSize_t,
-                                                       const void *, const void *, const void *, void *)> {};
+                                              std::tuple<DataType,
+                                                         DataType,
+                                                         DataType,
+                                                         DataType>,
+                                              void(const FC_Op::Attrs&,
+                                              const DimSize_t,
+                                              const DimSize_t,
+                                              const void *,
+                                              const void *,
+                                              const void *,
+                                              void *,
+                                              void *,
+                                              void *)> {};
 
 class FCImpl_cpu : public OperatorImpl {
 public:
-    FCImpl_cpu(const FC_Op &op) : OperatorImpl(op) {}
+    FCImpl_cpu(const FC_Op &op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<FCImpl_cpu> create(const FC_Op &op) {
         return std::make_unique<FCImpl_cpu>(op);
     }
 
-    void forward() override;
+    void forward() override final;
+    void backward() override final;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..50fb5f49033cccd3c554d692bc336c7d5d677384
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp
@@ -0,0 +1,84 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_FCIMPL_BACKWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_FCIMPL_BACKWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+#include <algorithm>
+
+#include "aidge/backend/cpu/operator/FCImpl.hpp"
+
+namespace Aidge {
+template <class I, class O, class W, class B>
+void FCImpl_cpu_backward_kernel(const FC_Op::Attrs& attrs, const DimSize_t batchSize, const DimSize_t oneInputSize,
+                                   const void* input_, const void* originalInput_, const void* weight_, void* output_, void* weightGrad_, void* biasesGrad_) {
+    // FIXME: missing FC attributes as arguments
+    const I* input  = static_cast<const I*>(input_);
+    const I* originalInput  = static_cast<const I*>(originalInput_);
+    const W* weight = static_cast<const W*>(weight_);
+    O* output       = static_cast<O*>(output_);
+    W* weightGrad   = static_cast<W*>(weightGrad_);
+    B* biasesGrad   = static_cast<B*>(biasesGrad_);
+
+
+    // bias grad
+    if (std::get<1>(attrs)) { // no bias
+        std::fill(biasesGrad, biasesGrad + std::get<0>(attrs), B(0));
+    } else {
+        for (std::size_t o = 0; o < std::get<0>(attrs); ++o) { // nb outputs
+            B sum{0};
+            for (std::size_t b = 0; b < batchSize; ++b) {
+                sum += input[b*std::get<0>(attrs) + o];
+            }
+            biasesGrad[o] = sum;
+        }
+    }
+
+    // weight grad
+    for (std::size_t o = 0; o < std::get<0>(attrs); ++o) {
+        for (std::size_t c = 0; c < oneInputSize; ++c) {
+            W sum{0};
+            for (std::size_t b = 0; b < batchSize; ++b) {
+                sum += originalInput[b*oneInputSize + c]*input[b*std::get<0>(attrs) + o];
+            }
+            weightGrad[o*oneInputSize + c] = sum;
+        }
+    }
+
+    // input grad
+    for (std::size_t b = 0; b < batchSize; ++b) {
+        for (std::size_t c = 0; c < oneInputSize; ++c) {
+            O sum{0};
+            for (std::size_t o = 0; o < std::get<0>(attrs); ++o) {
+                sum += weight[o*oneInputSize + c] * input[b*std::get<0>(attrs) + o];
+            }
+            output[b*oneInputSize + c] = sum;
+        }
+    }
+}
+
+
+namespace {
+static Registrar<FCImplBackward_cpu> registrarFCImpl2DBackward_cpu_Float32(
+        {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
+        Aidge::FCImpl_cpu_backward_kernel<float, float, float, float>);
+static Registrar<FCImplBackward_cpu> registrarFCImpl2DBackward_cpu_Int32(
+        {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
+        Aidge::FCImpl_cpu_backward_kernel<int, int, int, int>);
+static Registrar<FCImplBackward_cpu> registrarFCImpl2DBackward_cpu_Float64(
+        {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
+        Aidge::FCImpl_cpu_backward_kernel<double, double, double, double>);
+}  // namespace
+
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_FCIMPL_BACKWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/GatherImpl.hpp b/include/aidge/backend/cpu/operator/GatherImpl.hpp
index 28c9a31db337977405b66cbca61d950160679fa1..2164f6c4f26dca64c672f62bc8fdc0895c642ae4 100644
--- a/include/aidge/backend/cpu/operator/GatherImpl.hpp
+++ b/include/aidge/backend/cpu/operator/GatherImpl.hpp
@@ -32,7 +32,7 @@ class GatherImplBackward_cpu
 
 class GatherImpl_cpu : public OperatorImpl {
 public:
-    GatherImpl_cpu(const Gather_Op& op) : OperatorImpl(op) {}
+    GatherImpl_cpu(const Gather_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<GatherImpl_cpu> create(const Gather_Op& op) {
         return std::make_unique<GatherImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
index b60143dba18a11f7521f265ca0816984b67c6920..880a59b3aeae2598f6b1ed5e287af18fd7bcfd6f 100644
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
@@ -12,17 +12,17 @@
 #ifndef AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_
 #define AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_
 
+#include <memory>
+#include <tuple>
+#include <vector>
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/LeakyReLU.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include <memory>
-#include <vector>
 
 namespace Aidge {
-// class LeakyReLU_Op;
-
 // compute kernel registry for forward and backward
 class LeakyReLUImplForward_cpu
     : public Registrable<LeakyReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Attrs&, std::size_t, const void*, void*)> {
@@ -33,14 +33,17 @@ class LeakyReLUImplBackward_cpu
 
 class LeakyReLUImpl_cpu : public OperatorImpl {
 public:
-    LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : OperatorImpl(op) {}
+    LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<LeakyReLUImpl_cpu> create(const LeakyReLU_Op& op) {
         return std::make_unique<LeakyReLUImpl_cpu>(op);
     }
 
     Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    void forward() override;
+
+    void forward() override final;
+
+    void backward() override final;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..949e6af66a476693b347f38a45edea10e21bc933
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp
@@ -0,0 +1,45 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_BACKWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_BACKWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void LeakyReLUImpl_cpu_backward_kernel(const LeakyReLU_Op::Attrs& attrs,
+                                     std::size_t inputLenght,
+                                     const void* input_,
+                                     void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+    I negativeSlope = static_cast<I>(std::get<0>(attrs));
+
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        output[i] = input[i] > 0 ? input[i] : negativeSlope*input[i];
+    }
+}
+
+namespace {
+static Registrar<LeakyReLUImplBackward_cpu> registrarLeakyReLUImplBackward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::LeakyReLUImpl_cpu_backward_kernel<float, float>);
+static Registrar<LeakyReLUImplBackward_cpu> registrarLeakyReLUImplBackward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::LeakyReLUImpl_cpu_backward_kernel<int, int>);
+static Registrar<LeakyReLUImplBackward_cpu> registrarLeakyReLUImplBackward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::LeakyReLUImpl_cpu_backward_kernel<double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_BACKWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
index 437ba404b1cc39973448f3c5567aec2fe35994e3..e4b76d64baadbcb1baa7d24180c4bb13ed47215b 100644
--- a/include/aidge/backend/cpu/operator/MatMulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
@@ -35,7 +35,7 @@ class MatMulImplBackward_cpu
 
 class MatMulImpl_cpu : public OperatorImpl {
 public:
-    MatMulImpl_cpu(const MatMul_Op &op): OperatorImpl(op) {}
+    MatMulImpl_cpu(const MatMul_Op &op): OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<MatMulImpl_cpu> create(const MatMul_Op &op) {
         return std::make_unique<MatMulImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
index 675f3c4a030a4f668da63fd10f9dc91d39e524dd..d2d30aa7db5b1522712faa846ef33e1b21772d5e 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
@@ -38,7 +38,7 @@ class MaxPoolingImpl2DBackward_cpu
 
 class MaxPoolingImpl2D_cpu : public OperatorImpl {
 public:
-    MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : OperatorImpl(op) {}
+    MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<MaxPoolingImpl2D_cpu> create(const MaxPooling_Op<2> &op) {
         return std::make_unique<MaxPoolingImpl2D_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/MemorizeImpl.hpp b/include/aidge/backend/cpu/operator/MemorizeImpl.hpp
index af571a0cd49f80dd6c9a3abf87dae4ba586af5c4..5ea0c9d4f3802490e5b41b5ea1c8454c87c65b28 100644
--- a/include/aidge/backend/cpu/operator/MemorizeImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MemorizeImpl.hpp
@@ -23,7 +23,7 @@
 namespace Aidge {
 class MemorizeImpl_cpu : public OperatorImpl {
 public:
-    MemorizeImpl_cpu(const Memorize_Op& op) : OperatorImpl(op) {}
+    MemorizeImpl_cpu(const Memorize_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<MemorizeImpl_cpu> create(const Memorize_Op& op) {
         return std::make_unique<MemorizeImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/MulImpl.hpp b/include/aidge/backend/cpu/operator/MulImpl.hpp
index 6773b6f42497977679e5b6590c699aaf877bc3fc..2d42194c417bd7d57c00f4325a4585cf59d95b24 100644
--- a/include/aidge/backend/cpu/operator/MulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl.hpp
@@ -33,7 +33,7 @@ class MulImplBackward_cpu
 
 class MulImpl_cpu : public OperatorImpl {
 public:
-    MulImpl_cpu(const Mul_Op& op) : OperatorImpl(op) {}
+    MulImpl_cpu(const Mul_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<MulImpl_cpu> create(const Mul_Op& op) {
         return std::make_unique<MulImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/PadImpl.hpp b/include/aidge/backend/cpu/operator/PadImpl.hpp
index 41032c7220411b29de828763499c8bb751805369..b3c91a43419e9a5e9e1299f4a2118a51b6b64fc7 100644
--- a/include/aidge/backend/cpu/operator/PadImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl.hpp
@@ -40,7 +40,7 @@ class PadImpl2DBackward_cpu
 
 class PadImpl2D_cpu : public OperatorImpl {
 public:
-    PadImpl2D_cpu(const Pad_Op<2> &op) : OperatorImpl(op) {}
+    PadImpl2D_cpu(const Pad_Op<2> &op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<PadImpl2D_cpu> create(const Pad_Op<2> &op) {
         return std::make_unique<PadImpl2D_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/PopImpl.hpp b/include/aidge/backend/cpu/operator/PopImpl.hpp
index d7e484a509c05e5d0e2796542d6a0a8d5acdd3a7..19d5903973da378ce003daf4de9e1ae54d7b1b0e 100644
--- a/include/aidge/backend/cpu/operator/PopImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PopImpl.hpp
@@ -33,7 +33,7 @@ class PopImplBackward_cpu
 
 class PopImpl_cpu : public OperatorImpl {
 public:
-    PopImpl_cpu(const Pop_Op& op) : OperatorImpl(op) {}
+    PopImpl_cpu(const Pop_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<PopImpl_cpu> create(const Pop_Op& op) {
         return std::make_unique<PopImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/PowImpl.hpp b/include/aidge/backend/cpu/operator/PowImpl.hpp
index 7d17b370dd53817fd5ed61cd21d527e2850d0125..514e63af5ae5d1d1d00f7f328f5367df2bfa163d 100644
--- a/include/aidge/backend/cpu/operator/PowImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl.hpp
@@ -33,7 +33,7 @@ class PowImplBackward_cpu
 
 class PowImpl_cpu : public OperatorImpl {
 public:
-    PowImpl_cpu(const Pow_Op& op) : OperatorImpl(op) {}
+    PowImpl_cpu(const Pow_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<PowImpl_cpu> create(const Pow_Op& op) {
         return std::make_unique<PowImpl_cpu>(op);
@@ -41,6 +41,7 @@ public:
 
     Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
     void forward() override;
+    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl.hpp b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
index d8f8272ff09ec49924fe47825f56ee72faf4a644..cef82482813757312c638aebac9f2afd738493db 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
@@ -12,13 +12,15 @@
 #ifndef AIDGE_CPU_OPERATOR_RELUIMPL_H_
 #define AIDGE_CPU_OPERATOR_RELUIMPL_H_
 
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <tuple>    // std::tuple
+#include <vector>
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include <memory>
-#include <vector>
 
 namespace Aidge {
 // class ReLU_Op;
@@ -33,14 +35,17 @@ class ReLUImplBackward_cpu
 
 class ReLUImpl_cpu : public OperatorImpl {
 public:
-    ReLUImpl_cpu(const ReLU_Op& op) : OperatorImpl(op) {}
+    ReLUImpl_cpu(const ReLU_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ReLUImpl_cpu> create(const ReLU_Op& op) {
         return std::make_unique<ReLUImpl_cpu>(op);
     }
 
     Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    void forward() override;
+
+    void forward() override final;
+
+    void backward() override final;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..b68ea076cb94eb9550b4a7af89ef58162ee15aea
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
@@ -0,0 +1,45 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_
+
+#include <cstddef>  // std::size_t
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/ReLUImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void ReLUImpl_cpu_backward_kernel(const std::size_t inputLenght,
+                                     const void* input_,
+                                     void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        output[i] = (input[i] > I(0)) ? static_cast<O>(input[i]) : O(0);
+    }
+}
+
+namespace {
+static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::ReLUImpl_cpu_backward_kernel<float, float>);
+static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::ReLUImpl_cpu_backward_kernel<int, int>);
+static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::ReLUImpl_cpu_backward_kernel<double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
index 3c0fe6370ffdcdeff0702d9dbdff64b8297fd61e..7355a2bd46f45ab5019a31832001ae3335c1d8e8 100644
--- a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
@@ -25,76 +25,99 @@
 namespace Aidge {
 // class ReduceMean_Op;
 
-// compute kernel registry for forward and backward
-// DIM 1
-class ReduceMeanImpl1DForward_cpu
-    : public Registrable<ReduceMeanImpl1DForward_cpu,
+// Every DIM
+class ReduceMeanImplForward_cpu
+    : public Registrable<ReduceMeanImplForward_cpu,
                          std::tuple<DataType, DataType>,
-                         void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+                         void(const ReduceMean_Op::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
 class ReduceMeanImpl1DBackward_cpu
     : public Registrable<ReduceMeanImpl1DBackward_cpu,
                          std::tuple<DataType, DataType>,
-                         void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
+                         void(const ReduceMean_Op::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
 
-// DIM 2
-class ReduceMeanImpl2DForward_cpu
-    : public Registrable<ReduceMeanImpl2DForward_cpu,
-                         std::tuple<DataType, DataType>,
-                         void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
-class ReduceMeanImpl2DBackward_cpu
-    : public Registrable<ReduceMeanImpl2DBackward_cpu,
-                         std::tuple<DataType, DataType>,
-                         void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
-// DIM 3
-class ReduceMeanImpl3DForward_cpu
-    : public Registrable<ReduceMeanImpl3DForward_cpu,
-                         std::tuple<DataType, DataType>,
-                         void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
-class ReduceMeanImpl3DBackward_cpu
-    : public Registrable<ReduceMeanImpl3DBackward_cpu,
-                         std::tuple<DataType, DataType>,
-                         void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
-
-class ReduceMeanImpl1D_cpu : public OperatorImpl {
-   public:
-    ReduceMeanImpl1D_cpu(const ReduceMean_Op<1>& op) : OperatorImpl(op) {}
-
-    static std::unique_ptr<ReduceMeanImpl1D_cpu> create(const ReduceMean_Op<1> &op) {
-        return std::make_unique<ReduceMeanImpl1D_cpu>(op);
-    }
-
-   public:
-    void forward() override;
-};
-
-class ReduceMeanImpl2D_cpu : public OperatorImpl {
+class ReduceMeanImpl_cpu : public OperatorImpl {
    public:
-    ReduceMeanImpl2D_cpu(const ReduceMean_Op<2>& op) : OperatorImpl(op) {}
+    ReduceMeanImpl_cpu(const ReduceMean_Op& op) : OperatorImpl(op, "cpu") {}
 
-    static std::unique_ptr<ReduceMeanImpl2D_cpu> create(const ReduceMean_Op<2> &op) {
-        return std::make_unique<ReduceMeanImpl2D_cpu>(op);
+    static std::unique_ptr<ReduceMeanImpl_cpu> create(const ReduceMean_Op &op) {
+        return std::make_unique<ReduceMeanImpl_cpu>(op);
     }
 
    public:
     void forward() override;
 };
 
-class ReduceMeanImpl3D_cpu : public OperatorImpl {
-   public:
-    ReduceMeanImpl3D_cpu(const ReduceMean_Op<3>& op) : OperatorImpl(op) {}
-
-    static std::unique_ptr<ReduceMeanImpl3D_cpu> create(const ReduceMean_Op<3> &op) {
-        return std::make_unique<ReduceMeanImpl3D_cpu>(op);
-    }
-
-   public:
-    void forward() override;
-};
+// // compute kernel registry for forward and backward
+// // DIM 1
+// class ReduceMeanImpl1DForward_cpu
+//     : public Registrable<ReduceMeanImpl1DForward_cpu,
+//                          std::tuple<DataType, DataType>,
+//                          void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+// class ReduceMeanImpl1DBackward_cpu
+//     : public Registrable<ReduceMeanImpl1DBackward_cpu,
+//                          std::tuple<DataType, DataType>,
+//                          void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
+
+// // DIM 2
+// class ReduceMeanImpl2DForward_cpu
+//     : public Registrable<ReduceMeanImpl2DForward_cpu,
+//                          std::tuple<DataType, DataType>,
+//                          void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+// class ReduceMeanImpl2DBackward_cpu
+//     : public Registrable<ReduceMeanImpl2DBackward_cpu,
+//                          std::tuple<DataType, DataType>,
+//                          void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
+// // DIM 3
+// class ReduceMeanImpl3DForward_cpu
+//     : public Registrable<ReduceMeanImpl3DForward_cpu,
+//                          std::tuple<DataType, DataType>,
+//                          void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+// class ReduceMeanImpl3DBackward_cpu
+//     : public Registrable<ReduceMeanImpl3DBackward_cpu,
+//                          std::tuple<DataType, DataType>,
+//                          void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+
+// class ReduceMeanImpl1D_cpu : public OperatorImpl {
+//    public:
+//     ReduceMeanImpl1D_cpu(const ReduceMean_Op<1>& op) : OperatorImpl(op, "cpu") {}
+
+//     static std::unique_ptr<ReduceMeanImpl1D_cpu> create(const ReduceMean_Op<1> &op) {
+//         return std::make_unique<ReduceMeanImpl1D_cpu>(op);
+//     }
+
+//    public:
+//     void forward() override;
+// };
+
+// class ReduceMeanImpl2D_cpu : public OperatorImpl {
+//    public:
+//     ReduceMeanImpl2D_cpu(const ReduceMean_Op<2>& op) : OperatorImpl(op, "cpu") {}
+
+//     static std::unique_ptr<ReduceMeanImpl2D_cpu> create(const ReduceMean_Op<2> &op) {
+//         return std::make_unique<ReduceMeanImpl2D_cpu>(op);
+//     }
+
+//    public:
+//     void forward() override;
+// };
+
+// class ReduceMeanImpl3D_cpu : public OperatorImpl {
+//    public:
+//     ReduceMeanImpl3D_cpu(const ReduceMean_Op<3>& op) : OperatorImpl(op, "cpu") {}
+
+//     static std::unique_ptr<ReduceMeanImpl3D_cpu> create(const ReduceMean_Op<3> &op) {
+//         return std::make_unique<ReduceMeanImpl3D_cpu>(op);
+//     }
+
+//    public:
+//     void forward() override;
+// };
 namespace {
 // add cpu backend to ReduceMean_Op<2> implementation registry
-static Registrar<ReduceMean_Op<1>> registrarReduceMeanImpl1D_cpu("cpu", Aidge::ReduceMeanImpl1D_cpu::create);
-static Registrar<ReduceMean_Op<2>> registrarReduceMeanImpl2D_cpu("cpu", Aidge::ReduceMeanImpl2D_cpu::create);
-static Registrar<ReduceMean_Op<3>> registrarReduceMeanImpl3D_cpu("cpu", Aidge::ReduceMeanImpl3D_cpu::create);
+static Registrar<ReduceMean_Op> registrarReduceMeanImpl_cpu("cpu", Aidge::ReduceMeanImpl_cpu::create);
+// static Registrar<ReduceMean_Op<1>> registrarReduceMeanImpl1D_cpu("cpu", Aidge::ReduceMeanImpl1D_cpu::create);
+// static Registrar<ReduceMean_Op<2>> registrarReduceMeanImpl2D_cpu("cpu", Aidge::ReduceMeanImpl2D_cpu::create);
+// static Registrar<ReduceMean_Op<3>> registrarReduceMeanImpl3D_cpu("cpu", Aidge::ReduceMeanImpl3D_cpu::create);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp
index 46eb61f2f03acd47d74725ade1425a92f028690c..d7a967e84f53924a4b050ed79d1220f9bc79232e 100644
--- a/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp
@@ -12,10 +12,12 @@
 #ifndef AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_
 #define AIDGE_CPU_OPERATOR_REDUCEMEANIMPL_FORWARD_KERNEL_H_
 
-#include <cstddef>
-#include <algorithm>   // std::copy, std::for_each
-#include <numeric>     //std::accumulate
+#include <algorithm>   // std::for_each
+#include <cstddef>     // std::size_t
+#include <cstdint>     // std::int32_t
 #include <functional>  //std::multiplies
+#include <numeric>     //std::accumulate
+#include <vector>
 
 #include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
 #include "aidge/data/Data.hpp"
@@ -23,8 +25,8 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-template <class I, class O, DimSize_t DIM>
-void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs& attrs,
+template <class I, class O>
+void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op::Attrs& attrs,
                                      const std::vector<DimSize_t>& inputDims,
                                      const void* input_,
                                      void* output_) {
@@ -32,14 +34,15 @@ void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs&
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
+    const std::vector<std::int32_t>& axes = std::get<0>(attrs);
     const std::size_t nb_dims = inputDims.size();
     const std::size_t totalElements = std::accumulate(inputDims.cbegin(), inputDims.cend(), 1, std::multiplies<std::size_t>());
 
-    if (DIM == 1) {
-        const std::size_t stride_pre = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + std::get<0>(attrs)[0], 1, std::multiplies<std::size_t>());
-        const std::size_t stride_post = std::accumulate(inputDims.crbegin(), inputDims.crbegin() + nb_dims -1 - std::get<0>(attrs)[0], 1, std::multiplies<std::size_t>());
+    if (axes.size() == 1) {
+        const std::size_t stride_pre = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + axes[0], 1, std::multiplies<std::size_t>());
+        const std::size_t stride_post = std::accumulate(inputDims.crbegin(), inputDims.crbegin() + nb_dims -1 - axes[0], 1, std::multiplies<std::size_t>());
 
-        const std::size_t dim_i = inputDims[std::get<0>(attrs)[0]];
+        const std::size_t dim_i = inputDims[axes[0]];
         for (std::size_t pre = 0; pre < stride_pre; ++pre) {
             for (std::size_t post = 0; post < stride_post; ++post) {
                 const std::size_t idx_i = pre * dim_i * stride_post + post;
@@ -68,7 +71,7 @@ void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs&
         const I* inputAccumulation = input;
         I* outputAccumulation = nullptr;
 
-        for (const auto& axisInt : std::get<0>(attrs)) {
+        for (const auto& axisInt : axes) {
             const std::size_t a = static_cast<std::size_t>(axisInt);
             outputElements /= inputDims[a];
             outputAccumulation = new I[outputElements];
@@ -93,7 +96,7 @@ void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs&
         // Copy elements from inputAccumulation to output while dividing by divisor
         I divisor = totalElements / outputElements;
         std::transform(inputAccumulation, inputAccumulation + outputElements, output,
-                    [divisor](int element) { return element / divisor; });
+                    [divisor](I element) { return element / divisor; });
         if (outputAccumulation) {
             delete[] outputAccumulation;
         }
@@ -103,29 +106,36 @@ void ReduceMeanImpl_cpu_forward_kernel(const typename ReduceMean_Op<DIM>::Attrs&
 }
 
 namespace {
-// DIM = 1
-static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,1>);
-static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,1>);
-static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,1>);
-
-// DIM = 2
-static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,2>);
-static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,2>);
-static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,2>);
-
-// DIM = 3
-static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,3>);
-static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,3>);
-static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,3>);
+static Registrar<ReduceMeanImplForward_cpu> registrarReduceMeanImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float>);
+static Registrar<ReduceMeanImplForward_cpu> registrarReduceMeanImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int>);
+static Registrar<ReduceMeanImplForward_cpu> registrarReduceMeanImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double>);
+
+// // DIM = 1
+// static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float32(
+//         {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,1>);
+// static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Int32(
+//         {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,1>);
+// static Registrar<ReduceMeanImpl1DForward_cpu> registrarReduceMeanImplForward_1D_cpu_Float64(
+//         {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,1>);
+
+// // DIM = 2
+// static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float32(
+//         {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,2>);
+// static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Int32(
+//         {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,2>);
+// static Registrar<ReduceMeanImpl2DForward_cpu> registrarReduceMeanImplForward_2D_cpu_Float64(
+//         {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,2>);
+
+// // DIM = 3
+// static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float32(
+//         {DataType::Float32, DataType::Float32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<float, float,3>);
+// static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Int32(
+//         {DataType::Int32, DataType::Int32}, Aidge::ReduceMeanImpl_cpu_forward_kernel<int, int,3>);
+// static Registrar<ReduceMeanImpl3DForward_cpu> registrarReduceMeanImplForward_3D_cpu_Float64(
+//         {DataType::Float64, DataType::Float64}, Aidge::ReduceMeanImpl_cpu_forward_kernel<double, double,3>);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/ReshapeImpl.hpp b/include/aidge/backend/cpu/operator/ReshapeImpl.hpp
index 0a8b851fd8acf14c35434887d054d530eb1228bc..1dc5fa2a09533494568ffea78153887d01368a7d 100644
--- a/include/aidge/backend/cpu/operator/ReshapeImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReshapeImpl.hpp
@@ -32,7 +32,7 @@ class ReshapeImplBackward_cpu
 
 class ReshapeImpl_cpu : public OperatorImpl {
 public:
-    ReshapeImpl_cpu(const Reshape_Op& op) : OperatorImpl(op) {}
+    ReshapeImpl_cpu(const Reshape_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ReshapeImpl_cpu> create(const Reshape_Op& op) {
         return std::make_unique<ReshapeImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/ScalingImpl.hpp b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
index 29b61704f6acd85db1c635547e17f5f002e620f0..66bb42f7fb909ee9b6c91a6321ee3fa32c977626 100644
--- a/include/aidge/backend/cpu/operator/ScalingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
@@ -34,7 +34,7 @@ class ScalingImplBackward_cpu
 
 class ScalingImpl_cpu : public OperatorImpl {
 public:
-    ScalingImpl_cpu(const Scaling_Op& op) : OperatorImpl(op) {}
+    ScalingImpl_cpu(const Scaling_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<ScalingImpl_cpu> create(const Scaling_Op& op) {
         return std::make_unique<ScalingImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
index a34650d6326331320c69befc790752cb4023e0ba..2e43023d678c8a4258c80fb91d82d2858fcdf188 100644
--- a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
@@ -33,7 +33,7 @@ class SigmoidImplBackward_cpu
 
 class SigmoidImpl_cpu : public OperatorImpl {
 public:
-    SigmoidImpl_cpu(const Sigmoid_Op& op) : OperatorImpl(op) {}
+    SigmoidImpl_cpu(const Sigmoid_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<SigmoidImpl_cpu> create(const Sigmoid_Op& op) {
         return std::make_unique<SigmoidImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/SliceImpl.hpp b/include/aidge/backend/cpu/operator/SliceImpl.hpp
index e129c2e680cbe9bf12ec97c347768e73b7775cf0..1583435c12a243ef5861299434a7fc1409307538 100644
--- a/include/aidge/backend/cpu/operator/SliceImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SliceImpl.hpp
@@ -40,7 +40,7 @@ class SliceImplBackward_cpu
 
 class SliceImpl_cpu : public OperatorImpl {
 public:
-    SliceImpl_cpu(const Slice_Op& op) : OperatorImpl(op) {}
+    SliceImpl_cpu(const Slice_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<SliceImpl_cpu> create(const Slice_Op& op) {
         return std::make_unique<SliceImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
index 5625f7de7d65577c6829a1def514f8f69824dc9d..2b2fab485656efdc37ee134cb4ae574b6b403405 100644
--- a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
@@ -33,7 +33,7 @@ class SoftmaxImplBackward_cpu
 
 class SoftmaxImpl_cpu : public OperatorImpl {
 public:
-    SoftmaxImpl_cpu(const Softmax_Op& op) : OperatorImpl(op) {}
+    SoftmaxImpl_cpu(const Softmax_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<SoftmaxImpl_cpu> create(const Softmax_Op& op) {
         return std::make_unique<SoftmaxImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/SqrtImpl.hpp b/include/aidge/backend/cpu/operator/SqrtImpl.hpp
index f1848bde355c7b71e92395ef3901a69e7dca766f..1691d951678509274736d558360c8110958820a9 100644
--- a/include/aidge/backend/cpu/operator/SqrtImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SqrtImpl.hpp
@@ -12,16 +12,17 @@
 #ifndef AIDGE_CPU_OPERATOR_SQRTIMPL_H_
 #define AIDGE_CPU_OPERATOR_SQRTIMPL_H_
 
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <tuple>
+#include <vector>
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include <memory>
-#include <vector>
 
 namespace Aidge {
-// class Sqrt_Op;
 
 // compute kernel registry for forward and backward
 class SqrtImplForward_cpu
@@ -33,14 +34,17 @@ class SqrtImplBackward_cpu
 
 class SqrtImpl_cpu : public OperatorImpl {
 public:
-    SqrtImpl_cpu(const Sqrt_Op& op) : OperatorImpl(op) {}
+    SqrtImpl_cpu(const Sqrt_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<SqrtImpl_cpu> create(const Sqrt_Op& op) {
         return std::make_unique<SqrtImpl_cpu>(op);
     }
 
     Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    void forward() override;
+
+    void forward() override final;
+
+    void backward() override final;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9cf5118a5ac81520d7a180b6aba22417ca512890
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp
@@ -0,0 +1,46 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_SQRTIMPL_BACKWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_SQRTIMPL_BACKWARD_KERNEL_H_
+
+#include <cmath>    // std::sqrt
+#include <cstddef>  // std::size_t
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/SqrtImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void SqrtImpl_cpu_backward_kernel(const std::size_t inputLenght,
+                                     const void* input_,
+                                     void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        output[i] = static_cast<O>(0.5/(std::sqrt(static_cast<float>(input[i]))));
+    }
+}
+
+namespace {
+static Registrar<SqrtImplBackward_cpu> registrarSqrtImplBackward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::SqrtImpl_cpu_backward_kernel<float, float>);
+static Registrar<SqrtImplBackward_cpu> registrarSqrtImplBackward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::SqrtImpl_cpu_backward_kernel<int, int>);
+static Registrar<SqrtImplBackward_cpu> registrarSqrtImplBackward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::SqrtImpl_cpu_backward_kernel<double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_SQRTIMPL_BACKWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp
index a180fc2cc206ef27b52d506a981f9f50f7bf8a3e..886b978c2345ce555d229d684ba83f952be9e00e 100644
--- a/include/aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp
@@ -12,14 +12,16 @@
 #ifndef AIDGE_CPU_OPERATOR_SQRTIMPL_FORWARD_KERNEL_H_
 #define AIDGE_CPU_OPERATOR_SQRTIMPL_FORWARD_KERNEL_H_
 
+#include <cmath>    // std::sqrt
+#include <cstddef>  // std::size_t
+
 #include "aidge/utils/Registrar.hpp"
-#include <cmath>
 
 #include "aidge/backend/cpu/operator/SqrtImpl.hpp"
 
 namespace Aidge {
 template <class I, class O>
-void SqrtImpl_cpu_forward_kernel(std::size_t inputLenght,
+void SqrtImpl_cpu_forward_kernel(const std::size_t inputLenght,
                                      const void* input_,
                                      void* output_) {
 
@@ -27,7 +29,7 @@ void SqrtImpl_cpu_forward_kernel(std::size_t inputLenght,
     O* output = static_cast<O*>(output_);
 
     for (std::size_t i = 0; i < inputLenght; ++i) {
-        output[i] = std::sqrt(input[i]);
+        output[i] = static_cast<O>(std::sqrt(static_cast<float>(input[i])));
     }
 }
 
diff --git a/include/aidge/backend/cpu/operator/SubImpl.hpp b/include/aidge/backend/cpu/operator/SubImpl.hpp
index a9006a04bc3690429532c1a6b9cc76f9ef32880e..15c028ae6289f39e0b6e6fd74e51e138b1f2675c 100644
--- a/include/aidge/backend/cpu/operator/SubImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SubImpl.hpp
@@ -33,7 +33,7 @@ class SubImplBackward_cpu
 
 class SubImpl_cpu : public OperatorImpl {
 public:
-    SubImpl_cpu(const Sub_Op& op) : OperatorImpl(op) {}
+    SubImpl_cpu(const Sub_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<SubImpl_cpu> create(const Sub_Op& op) {
         return std::make_unique<SubImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/TanhImpl.hpp b/include/aidge/backend/cpu/operator/TanhImpl.hpp
index b477d0bd0ee5434d942dfa1057968fa904300dde..9e44f7bcd2b2392c634421478a096258b3e39795 100644
--- a/include/aidge/backend/cpu/operator/TanhImpl.hpp
+++ b/include/aidge/backend/cpu/operator/TanhImpl.hpp
@@ -33,7 +33,7 @@ class TanhImplBackward_cpu
 
 class TanhImpl_cpu : public OperatorImpl {
 public:
-    TanhImpl_cpu(const Tanh_Op& op) : OperatorImpl(op) {}
+    TanhImpl_cpu(const Tanh_Op& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<TanhImpl_cpu> create(const Tanh_Op& op) {
         return std::make_unique<TanhImpl_cpu>(op);
diff --git a/include/aidge/backend/cpu/operator/TransposeImpl.hpp b/include/aidge/backend/cpu/operator/TransposeImpl.hpp
index a1b9d274d2c14064ed9305b5d6c969dfa544b26b..8bdcc612ea434e266a97724d45aaeefc8e033bf0 100644
--- a/include/aidge/backend/cpu/operator/TransposeImpl.hpp
+++ b/include/aidge/backend/cpu/operator/TransposeImpl.hpp
@@ -57,7 +57,7 @@ class TransposeImpl6DBackward_cpu
 
 class TransposeImpl2D_cpu : public OperatorImpl {
 public:
-    TransposeImpl2D_cpu(const Transpose_Op<2>& op) : OperatorImpl(op) {}
+    TransposeImpl2D_cpu(const Transpose_Op<2>& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<TransposeImpl2D_cpu> create(const Transpose_Op<2>& op) {
         return std::make_unique<TransposeImpl2D_cpu>(op);
@@ -67,7 +67,7 @@ public:
 };
 class TransposeImpl3D_cpu : public OperatorImpl {
 public:
-    TransposeImpl3D_cpu(const Transpose_Op<3>& op) : OperatorImpl(op) {}
+    TransposeImpl3D_cpu(const Transpose_Op<3>& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<TransposeImpl3D_cpu> create(const Transpose_Op<3>& op) {
         return std::make_unique<TransposeImpl3D_cpu>(op);
@@ -77,7 +77,7 @@ public:
 };
 class TransposeImpl4D_cpu : public OperatorImpl {
 public:
-    TransposeImpl4D_cpu(const Transpose_Op<4>& op) : OperatorImpl(op) {}
+    TransposeImpl4D_cpu(const Transpose_Op<4>& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<TransposeImpl4D_cpu> create(const Transpose_Op<4>& op) {
         return std::make_unique<TransposeImpl4D_cpu>(op);
@@ -87,7 +87,7 @@ public:
 };
 class TransposeImpl5D_cpu : public OperatorImpl {
 public:
-    TransposeImpl5D_cpu(const Transpose_Op<5>& op) : OperatorImpl(op) {}
+    TransposeImpl5D_cpu(const Transpose_Op<5>& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<TransposeImpl5D_cpu> create(const Transpose_Op<5>& op) {
         return std::make_unique<TransposeImpl5D_cpu>(op);
@@ -97,7 +97,7 @@ public:
 };
 class TransposeImpl6D_cpu : public OperatorImpl {
 public:
-    TransposeImpl6D_cpu(const Transpose_Op<6>& op) : OperatorImpl(op) {}
+    TransposeImpl6D_cpu(const Transpose_Op<6>& op) : OperatorImpl(op, "cpu") {}
 
     static std::unique_ptr<TransposeImpl6D_cpu> create(const Transpose_Op<6>& op) {
         return std::make_unique<TransposeImpl6D_cpu>(op);
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index 98de9188dad5539275bba9ae7961153099fb1b9f..d6d75a608e4da7d8b9ed8a28912ff2eb1751e042 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -9,17 +9,18 @@
  *
  ********************************************************************************/
 
+#include "aidge/backend/cpu/operator/AddImpl.hpp"
+
 #include <cassert>
 #include <numeric> // std::accumulate
 #include <vector>
 
-#include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include "aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
-
-#include "aidge/backend/cpu/operator/AddImpl.hpp"
-#include "aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 Aidge::Elts_t  Aidge::AddImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
@@ -27,15 +28,18 @@ Aidge::Elts_t  Aidge::AddImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t
 }
 
 void  Aidge::AddImpl_cpu::forward() {
-    assert(mOp.getRawInput(0) && "missing input in Add operator");
-    DataType datatypeFirstInput = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType();
-    for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) {
-        assert(mOp.getRawInput(i) && "missing input in Add operator");
-        assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dataType() == datatypeFirstInput);
+    const auto& opTensor = static_cast<const OperatorTensor&>(mOp);
+    AIDGE_ASSERT(opTensor.getInput(0)->hasImpl(), "cannot run Add forward because the 0-th input has no implementation.");
+    assert(opTensor.getInput(0) && "missing input in Add operator");
+    DataType datatypeFirstInput = opTensor.getInput(0)->dataType();
+    for (IOIndex_t i = 1; i < opTensor.nbInputs(); ++i) {
+        AIDGE_ASSERT(opTensor.getInput(i)->hasImpl(), "cannot run Add forward because the {}-th input has no implementation.", i);
+        assert(opTensor.getInput(i) && "missing input in Add operator");
+        assert(opTensor.getInput(i)->dataType() == datatypeFirstInput);
     }
 
     // Find the correct kernel type
-    const auto outputDataType = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType();
+    const auto outputDataType = opTensor.getOutput(0)->dataType();
     const Registrar<AddImplForward_cpu>::registrar_key registrarKey = {
         datatypeFirstInput,
         outputDataType};
@@ -55,26 +59,26 @@ void  Aidge::AddImpl_cpu::forward() {
     // TODO: right now, if needed, memory will be allocated/deallocated at each
     // call to forward(). We might put the following shared_ptr as members of
     // this class to avoid that.
-    std::size_t nbDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->nbDims();
+    const std::size_t nbDims = opTensor.getOutput(0)->nbDims();
     std::vector<std::vector<std::size_t>> inputsDims;
     std::vector<const void*> opInputs;
-    std::vector<std::shared_ptr<Tensor>> inputsFallback(mOp.nbInputs());
-    for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
+    std::vector<std::shared_ptr<Tensor>> inputsFallback(opTensor.nbInputs());
+    for (IOIndex_t i = 0; i < opTensor.nbInputs(); ++i) {
         std::vector<std::size_t> inputDims(nbDims, 1);
-        auto dims = std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dims();
+        auto dims = opTensor.getInput(i)->dims();
 		for(std::size_t j=dims.size()-1; j+1>0; --j)
 		{
 			std::size_t idx = nbDims - (dims.size()-j);
 			inputDims[idx] = dims[j];
 		}
         inputsDims.push_back(inputDims);
-        const auto& input = std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->refCastFrom(inputsFallback[i], *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
+        const auto& input = opTensor.getInput(i)->refCastFrom(inputsFallback[i], *opTensor.getOutput(0));
         opInputs.push_back(input.getImpl()->rawPtr());
     }
 
     kernelFunc(opInputs,
                inputsDims,
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
-               getCPUPtr(mOp.getRawOutput(0)));
+               opTensor.getOutput(0)->size(),
+               opTensor.getOutput(0)->dims(),
+               getCPUPtr(opTensor.getRawOutput(0)));
 }
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index 3beb2bcf72ed9e318733dce9e69d41c61bf11e5b..7457a1a0b75af1f922c5a65ac88aabc813d00069 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -28,17 +28,19 @@ Aidge::Elts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx
 }
 
 void Aidge::ConvImpl2D_cpu::forward() {
+    const auto& opTensor = static_cast<const OperatorTensor&>(mOp);
+
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getRawInput(0) && "missing input #0");
     assert(mOp.getRawInput(1) && "missing input #1");
     assert(mOp.getRawInput(2) && "missing input #2");
 
     // Find the correct kernel type
-    const auto outputDataType = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType();
+    const auto outputDataType = opTensor.getOutput(0)->dataType();
     const Registrar<ConvImpl2DForward_cpu>::registrar_key registrarKey = {
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->dataType(),
+        opTensor.getInput(0)->dataType(),
+        opTensor.getInput(1)->dataType(),
+        opTensor.getInput(2)->dataType(),
         outputDataType};
 
     Registrar<ConvImpl2DForward_cpu>::registrar_type kernelFunc;
@@ -57,12 +59,12 @@ void Aidge::ConvImpl2D_cpu::forward() {
     // call to forward(). We might put the following shared_ptr as members of
     // this class to avoid that.
     std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback;
-    const auto& input0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->refCastFrom(input0Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
-    const auto& input1 = std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->refCastFrom(input1Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
-    const auto& input2 = std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->refCastFrom(input2Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
+    const auto& input0 = opTensor.getInput(0)->refCastFrom(input0Fallback, *opTensor.getOutput(0));
+    const auto& input1 = opTensor.getInput(1)->refCastFrom(input1Fallback, *opTensor.getOutput(0));
+    const auto& input2 = opTensor.getInput(2)->refCastFrom(input2Fallback, *opTensor.getOutput(0));
 
     // Call kernel
-    kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
+    kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), opTensor.getInput(0)->template dims<4>(),
         input0.getImpl()->rawPtr(), input1.getImpl()->rawPtr(), input2.getImpl()->rawPtr(),
         getCPUPtr(mOp.getRawOutput(0)));
 }
diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp
index bfb2ae643a02d67ea1a289d0383b816b5a6ad110..098b20776888c6d72110e4bc4c0c3e191febd41c 100644
--- a/src/operator/DivImpl.cpp
+++ b/src/operator/DivImpl.cpp
@@ -57,17 +57,18 @@ void Aidge::DivImpl_cpu::forward() {
     // 3. Compute the highest number of contiguous data -> 7
     // 4. Compute stride and offset step for the broadcast mechnism
     // 5. Call a simple kernel
+    const auto& opTensor = static_cast<const Div_Op&>(mOp);
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<DivImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+        opTensor.getInput(0)->dataType(),
+        opTensor.getInput(1)->dataType(),
+        opTensor.getOutput(0)->dataType()});
 
     // Compute compatible input dimensions
-    std::vector<std::size_t>        dims0   = static_cast<const Div_Op&>(mOp).getInput(0)->dims();
-    std::vector<std::size_t>        dims1   = static_cast<const Div_Op&>(mOp).getInput(1)->dims();
-    const std::vector<std::size_t>& outDims = static_cast<const Div_Op&>(mOp).getOutput(0)->dims();
+    std::vector<std::size_t>        dims0   = opTensor.getInput(0)->dims();
+    std::vector<std::size_t>        dims1   = opTensor.getInput(1)->dims();
+    const std::vector<std::size_t>& outDims = opTensor.getOutput(0)->dims();
 
     // if (dims0 == dims1) {
     //     const std::size_t input0_contiguous_size = std::accumulate(dims0.cbegin(), dims0.cend(), std::size_t(1), std::multiplies<std::size_t>());
@@ -108,24 +109,24 @@ void Aidge::DivImpl_cpu::forward() {
     const std::size_t output_contiguous_size = std::accumulate(outDims.cbegin()+contiguousIdx, outDims.cend(), std::size_t(1), std::multiplies<std::size_t>());
 
     // initialize strides to iterate through data because of broadcasting
-    std::size_t *stride_post0;
-    std::size_t *stride_post1;
+    std::int32_t *stride_post0;
+    std::int32_t *stride_post1;
     std::int32_t *stride_step0;
     std::int32_t *stride_step1;
     if (contiguousIdx > 0) {
-        stride_post0 = new std::size_t[contiguousIdx];
+        stride_post0 = new std::int32_t[contiguousIdx];
         stride_post0[contiguousIdx - 1] = 1;
-        stride_post1 = new std::size_t[contiguousIdx];
+        stride_post1 = new std::int32_t[contiguousIdx];
         stride_post1[contiguousIdx - 1] = 1;
         for (std::size_t i = contiguousIdx - 2; i != static_cast<std::size_t>(-1); --i) {
-            stride_post0[i] = stride_post0[i+1]*dims0[i+1];
-            stride_post1[i] = stride_post1[i+1]*dims1[i+1];
+            stride_post0[i] = stride_post0[i+1]*static_cast<std::int32_t>(dims0[i+1]);
+            stride_post1[i] = stride_post1[i+1]*static_cast<std::int32_t>(dims1[i+1]);
         }
         stride_step0 = new std::int32_t[contiguousIdx];
         stride_step1 = new std::int32_t[contiguousIdx];
         for (std::size_t i = 0; i != contiguousIdx; ++i) {
-            stride_step0[i] = (dims0[i] == 1) ? 1 - static_cast<std::int32_t>(stride_post0[i]) : 1;
-            stride_step1[i] = (dims1[i] == 1) ? 1 - static_cast<std::int32_t>(stride_post1[i]) : 1;
+            stride_step0[i] = (dims0[i] == 1) ? 1 - stride_post0[i] : 1;
+            stride_step1[i] = (dims1[i] == 1) ? 1 - stride_post1[i] : 1;
         }
     }
 
diff --git a/src/operator/ErfImpl.cpp b/src/operator/ErfImpl.cpp
index 1e6d2766f49a0a0b65c1cdb974f42d2865ae59f5..ace098468c05b80c4116e6f85d00b5fabaf754cd 100644
--- a/src/operator/ErfImpl.cpp
+++ b/src/operator/ErfImpl.cpp
@@ -9,32 +9,34 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
+#include "aidge/backend/cpu/operator/ErfImpl.hpp"
+
+#include <memory>
 #include <vector>
 
+#include "aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Erf.hpp"
 #include "aidge/utils/Types.h"
 
-#include "aidge/backend/cpu/operator/ErfImpl.hpp"
-#include "aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp"
-
 Aidge::Elts_t Aidge::ErfImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
     return Elts_t::DataElts(0);
 }
 
 void Aidge::ErfImpl_cpu::forward() {
+    const Erf_Op& op = static_cast<const Erf_Op&>(mOp);
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<ErfImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+                            op.getInput(0)->dataType(),
+                            op.getOutput(0)->dataType()
+                        });
 
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+    kernelFunc(
+        op.getInput(0)->size(),
+        op.getInput(0)->getImpl()->rawPtr(),
+        op.getOutput(0)->getImpl()->rawPtr()
+    );
 }
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index 995245907c8c87b0367c7edfa4493bd6b7faf660..eecff38afd4d4487d51a070d6c0f4c2507a2b478 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -9,31 +9,34 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
-#include <vector>
+#include "aidge/backend/cpu/operator/FCImpl.hpp"
+
+#include <cstddef>  // std::size_t
+#include <functional>
+#include <memory>
+#include <tuple>
 
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include "aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp"
+#include "aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp"
 #include "aidge/operator/FC.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
-#include "aidge/backend/cpu/operator/FCImpl.hpp"
-#include "aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp"
 
 void Aidge::FCImpl_cpu::forward()
 {
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)) && "missing input #1");
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(2)) && "missing input #2");
+    const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
+    AIDGE_ASSERT(op_.getInput(0), "missing input #0");
+    AIDGE_ASSERT(op_.getInput(1), "missing input #1");
+    AIDGE_ASSERT(op_.getInput(2), "missing input #2");
 
     // Find the correct kernel type
-    const auto outputDataType = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType();
+    const auto outputDataType = op_.getOutput(0)->dataType();
     const Registrar<FCImplForward_cpu>::registrar_key registrarKey = {
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->dataType(),
+        op_.getInput(0)->dataType(),
+        op_.getInput(1)->dataType(),
+        op_.getInput(2)->dataType(),
         outputDataType};
 
     Registrar<FCImplForward_cpu>::registrar_type kernelFunc;
@@ -52,9 +55,9 @@ void Aidge::FCImpl_cpu::forward()
     // call to forward(). We might put the following shared_ptr as members of
     // this class to avoid that.
     std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback;
-    const auto& input0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->refCastFrom(input0Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
-    const auto& input1 = std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->refCastFrom(input1Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
-    const auto& input2 = std::static_pointer_cast<Tensor>(mOp.getRawInput(2))->refCastFrom(input2Fallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
+    const auto& input0 = op_.getInput(0)->refCastFrom(input0Fallback, *(op_.getOutput(0)));
+    const auto& input1 = op_.getInput(1)->refCastFrom(input1Fallback, *(op_.getOutput(0)));
+    const auto& input2 = op_.getInput(2)->refCastFrom(input2Fallback, *(op_.getOutput(0)));
 
     // Call kernel
     const auto batchSize = (input0.dims().size() > 1) ? input0.dims()[0] : 1;
@@ -64,3 +67,49 @@ void Aidge::FCImpl_cpu::forward()
         input0.getImpl()->rawPtr(), input1.getImpl()->rawPtr(), input2.getImpl()->rawPtr(),
         getCPUPtr(mOp.getRawOutput(0)));
 }
+
+void Aidge::FCImpl_cpu::backward()
+{
+    const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
+    const auto& fc_grad = op_.getOutput(0)->grad();
+    assert(fc_grad && "missing ouput #0 gradient");
+
+    // Find the correct kernel type
+    const Registrar<FCImplBackward_cpu>::registrar_key registrarKey = {
+        fc_grad->dataType(),
+        op_.getInput(0)->grad()->dataType(),
+        op_.getInput(1)->grad()->dataType(),
+        op_.getInput(2)->grad()->dataType()};
+
+    Registrar<FCImplBackward_cpu>::registrar_type kernelFunc;
+    if (Registrar<FCImplBackward_cpu>::exists(registrarKey)) {
+        // One exists with the right inputs/output types
+        kernelFunc = Registrar<FCImplBackward_cpu>::create(registrarKey);
+    }
+    else {
+        // Otherwise, fallback to the kernel with all types matching output type
+        kernelFunc = Registrar<FCImplBackward_cpu>::create({
+            fc_grad->dataType(), fc_grad->dataType(), fc_grad->dataType(), fc_grad->dataType()});
+    }
+
+    // Convert input data (no overhead if not needed!)
+    // TODO: right now, if needed, memory will be allocated/deallocated at each
+    // call to forward(). We might put the following shared_ptr as members of
+    // this class to avoid that.
+    std::shared_ptr<Tensor> input0gradFallback, input1gradFallback, input2gradFallback;
+    const auto& input0grad = op_.getInput(0)->grad()->refCastFrom(input0gradFallback, *(op_.getOutput(0)));
+    const auto& input1grad = op_.getInput(1)->grad()->refCastFrom(input1gradFallback, *(op_.getOutput(0)));
+    const auto& input2grad = op_.getInput(2)->grad()->refCastFrom(input2gradFallback, *(op_.getOutput(0)));
+
+    // Call kernel
+    const auto batchSize = (input0grad.dims().size() > 1) ? input0grad.dims()[0] : 1;
+    kernelFunc(dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(),
+        batchSize,
+        input0grad.size() / batchSize,
+        getCPUPtr(fc_grad),
+        getCPUPtr(op_.getInput(0)),
+        getCPUPtr(mOp.getRawInput(1)),
+        input0grad.getImpl()->rawPtr(),
+        input1grad.getImpl()->rawPtr(),
+        input2grad.getImpl()->rawPtr());
+}
diff --git a/src/operator/GatherImpl.cpp b/src/operator/GatherImpl.cpp
index 523cc0365884cb0496a46eb550aa90fa6f4c421d..5384f64536955b7cb2ed85af81e52697e9b84a2a 100644
--- a/src/operator/GatherImpl.cpp
+++ b/src/operator/GatherImpl.cpp
@@ -9,27 +9,29 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
+#include "aidge/backend/cpu/operator/GatherImpl.hpp"
+
+#include <memory>
 #include <vector>
 
+#include "aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Gather.hpp"
 #include "aidge/utils/Types.h"
 
-#include "aidge/backend/cpu/operator/GatherImpl.hpp"
-#include "aidge/backend/cpu/operator/GatherImpl_forward_kernels.hpp"
-
 void Aidge::GatherImpl_cpu::forward() {
+    const Gather_Op& op = static_cast<const Gather_Op&>(mOp);
 
     auto kernelFunc = Registrar<GatherImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+                            op.getInput(0)->dataType(),
+                            op.getOutput(0)->dataType()
+                        });
 
     // Call kernel
     kernelFunc(dynamic_cast<const Gather_Op&>(mOp).getStaticAttributes(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+            op.getInput(0)->dims(),
+            op.getInput(0)->getImpl()->rawPtr(),
+            op.getOutput(0)->getImpl()->rawPtr()
+        );
 }
diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp
index 7d41163e6c0dc3e1bc7a4ca3075520243aac6958..340af3eeaf370988f9b12d8535812c938e47078a 100644
--- a/src/operator/LeakyReLUImpl.cpp
+++ b/src/operator/LeakyReLUImpl.cpp
@@ -10,17 +10,17 @@
  ********************************************************************************/
 
 #include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
 #include <vector>
 
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/LeakyReLU.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp"
 
 Aidge::Elts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
@@ -28,16 +28,38 @@ Aidge::Elts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIn
 }
 
 void Aidge::LeakyReLUImpl_cpu::forward() {
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+    const LeakyReLU_Op& op_ = dynamic_cast<const LeakyReLU_Op&>(mOp);
+    std::shared_ptr<Tensor> in0 = op_.getInput(0);
+    std::shared_ptr<Tensor> out0 = op_.getOutput(0);
+    AIDGE_ASSERT(in0, "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<LeakyReLUImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+        in0->dataType(),
+        out0->dataType()});
 
     // Call kernel
     kernelFunc(dynamic_cast<const LeakyReLU_Op&>(mOp).getStaticAttributes(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+        in0->size(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawOutput(0)));
 }
+
+void Aidge::LeakyReLUImpl_cpu::backward() {
+    // reversing in and out Data for backprop
+    const LeakyReLU_Op& op_ = dynamic_cast<const LeakyReLU_Op&>(mOp);
+    std::shared_ptr<Tensor> in0  = op_.getOutput(0)->grad();
+    std::shared_ptr<Tensor> out0 = op_.getInput(0)->grad();
+    AIDGE_ASSERT(in0, "missing input #0");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<LeakyReLUImplForward_cpu>::create({
+        in0->dataType(),
+        out0->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const LeakyReLU_Op&>(mOp).getStaticAttributes(),
+        in0->size(),
+        getCPUPtr(in0),
+        getCPUPtr(out0));
+}
\ No newline at end of file
diff --git a/src/operator/PowImpl.cpp b/src/operator/PowImpl.cpp
index 782ca35706b5fd28e376f97651c847492b9bf755..811d13804cffdd2477fc830f1779b0fb6271eb0b 100644
--- a/src/operator/PowImpl.cpp
+++ b/src/operator/PowImpl.cpp
@@ -48,3 +48,25 @@ void Aidge::PowImpl_cpu::forward() {
         getCPUPtr(mOp.getRawInput(1)),
         getCPUPtr(mOp.getRawOutput(0)));
 }
+
+void Aidge::PowImpl_cpu::backward() {
+    // Find the correct kernel type
+    const Pow_Op& op_ = dynamic_cast<const Pow_Op&>(mOp);
+    auto kernelFunc = Registrar<PowImplForward_cpu>::create({
+        op_.getOutput(0)->grad()->dataType(),
+        op_.getInput(0)->grad()->dataType(),
+        op_.getInput(1)->grad()->dataType()});
+
+    const std::vector<std::size_t> input0gradDims = getBroadcastedDims(op_.getInput(0)->grad()->dims(),
+                                                                   op_.getOutput(0)->grad()->dims());
+    const std::vector<std::size_t> input1gradDims = getBroadcastedDims(op_.getInput(1)->grad()->dims(),
+                                                                   op_.getOutput(0)->grad()->dims());
+
+    // Call kernel
+    kernelFunc(op_.getOutput(0)->grad()->dims(),
+               input0gradDims,
+               input1gradDims,
+               getCPUPtr(mOp.getRawOutput(0)),
+               getCPUPtr(mOp.getRawInput(0)),
+               getCPUPtr(mOp.getRawInput(1)));
+}
\ No newline at end of file
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index 81d1639daf02e8fac0bff3bc30de482b4f0a76d8..4bba09b6fbeea1552bf5b7cc7e491291345fca45 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -9,18 +9,18 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
+#include <memory>
 #include <vector>
 
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp"
 
 Aidge::Elts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
@@ -28,15 +28,33 @@ Aidge::Elts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t
 }
 
 void Aidge::ReLUImpl_cpu::forward() {
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+    std::shared_ptr<Tensor> in0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0));
+    AIDGE_ASSERT(in0, "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<ReLUImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+        in0->dataType(),
         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+    kernelFunc(in0->size(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawOutput(0)));
 }
+
+void Aidge::ReLUImpl_cpu::backward() {
+    // reversing in and out Tensors
+        const ReLU_Op& op_ = dynamic_cast<const ReLU_Op&>(mOp);
+    std::shared_ptr<Tensor> in0  = op_.getOutput(0)->grad();
+    std::shared_ptr<Tensor> out0 = op_.getInput(0)->grad();
+    AIDGE_ASSERT(out0, "current {} operator output#0 has not gradient Tensor.", op_.type());
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<ReLUImplBackward_cpu>::create({
+        in0->dataType(),
+        out0->dataType()
+    });
+
+    // Call kernel
+    kernelFunc(in0->size(), getCPUPtr(in0), getCPUPtr(out0));
+}
diff --git a/src/operator/ReduceMeanImpl.cpp b/src/operator/ReduceMeanImpl.cpp
index 324daa9ea2cf49ad15bde0d6c41c6bbcd7eb0c45..a9f17a28a2a47ec7bc50820d587e8d0f359d2bb3 100644
--- a/src/operator/ReduceMeanImpl.cpp
+++ b/src/operator/ReduceMeanImpl.cpp
@@ -9,59 +9,70 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
+#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
+
+#include <memory>
 #include <vector>
 
 #include "aidge/utils/Types.h"
 #include "aidge/operator/ReduceMean.hpp"
-
-#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
 #include "aidge/backend/cpu/operator/ReduceMeanImpl_forward_kernels.hpp"
 
-void Aidge::ReduceMeanImpl1D_cpu::forward() {
-
+void Aidge::ReduceMeanImpl_cpu::forward() {
+    const ReduceMean_Op& op_ = dynamic_cast<const ReduceMean_Op&>(mOp);
     // Find the correct kernel type
-    auto kernelFunc =
-            Registrar<ReduceMeanImpl1DForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+    auto kernelFunc = Registrar<ReduceMeanImplForward_cpu>::create({
+        op_.getInput(0)->dataType(),
+        op_.getOutput(0)->dataType()});
 
     // Call kernel
-    kernelFunc(dynamic_cast<const ReduceMean_Op<1>&>(mOp).getStaticAttributes(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+    kernelFunc(op_.getStaticAttributes(),
+               op_.getInput(0)->dims(),
+               op_.getInput(0)->getImpl()->rawPtr(),
+               op_.getOutput(0)->getImpl()->rawPtr());
 }
 
-void Aidge::ReduceMeanImpl2D_cpu::forward() {
+// void Aidge::ReduceMeanImpl1D_cpu::forward() {
 
-    // Find the correct kernel type
-    auto kernelFunc =
-            Registrar<ReduceMeanImpl2DForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+//     // Find the correct kernel type
+//     auto kernelFunc =
+//             Registrar<ReduceMeanImpl1DForward_cpu>::create({
+//         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+//         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
-    // Call kernel
-    kernelFunc(dynamic_cast<const ReduceMean_Op<2>&>(mOp).getStaticAttributes(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
-}
+//     // Call kernel
+//     kernelFunc(dynamic_cast<const ReduceMean_Op<1>&>(mOp).getStaticAttributes(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+// }
 
-void Aidge::ReduceMeanImpl3D_cpu::forward() {
+// void Aidge::ReduceMeanImpl2D_cpu::forward() {
 
-    // Find the correct kernel type
-    auto kernelFunc =
-            Registrar<ReduceMeanImpl3DForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+//     // Find the correct kernel type
+//     auto kernelFunc =
+//             Registrar<ReduceMeanImpl2DForward_cpu>::create({
+//         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+//         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
 
-    // Call kernel
-    kernelFunc(dynamic_cast<const ReduceMean_Op<3>&>(mOp).getStaticAttributes(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
-}
\ No newline at end of file
+//     // Call kernel
+//     kernelFunc(dynamic_cast<const ReduceMean_Op<2>&>(mOp).getStaticAttributes(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+// }
+
+// void Aidge::ReduceMeanImpl3D_cpu::forward() {
+
+//     // Find the correct kernel type
+//     auto kernelFunc =
+//             Registrar<ReduceMeanImpl3DForward_cpu>::create({
+//         std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
+//         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+
+//     // Call kernel
+//     kernelFunc(dynamic_cast<const ReduceMean_Op<3>&>(mOp).getStaticAttributes(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+//                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+// }
\ No newline at end of file
diff --git a/src/operator/ReshapeImpl.cpp b/src/operator/ReshapeImpl.cpp
index 8cd71c4ed65a808b573736f13c4f64f61b2e4795..69c1c3135ce9f32d536bfd2c41b90eb55f7d8986 100644
--- a/src/operator/ReshapeImpl.cpp
+++ b/src/operator/ReshapeImpl.cpp
@@ -9,13 +9,13 @@
  *
  ********************************************************************************/
 
-#include <cassert>
+#include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
 
+#include "aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Reshape.hpp"
 #include "aidge/utils/Types.h"
-
-#include "aidge/backend/cpu/operator/ReshapeImpl.hpp"
-#include "aidge/backend/cpu/operator/ReshapeImpl_forward_kernels.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 Aidge::Elts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
@@ -23,17 +23,17 @@ Aidge::Elts_t Aidge::ReshapeImpl_cpu::getNbRequiredProtected(const Aidge::IOInde
 }
 
 void Aidge::ReshapeImpl_cpu::forward() {
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size() == 
-           std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size()
-            && "input must have the same overall size as shape");
+    const Reshape_Op& op_ = static_cast<const Reshape_Op&>(mOp);
+    AIDGE_ASSERT(op_.getInput(0)->size() == op_.getOutput(0)->size(),
+                    "input must have the same overall size as shape");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<ReshapeImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+        op_.getInput(0)->dataType(),
+        op_.getOutput(0)->dataType()});
 
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
-               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
-               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+    kernelFunc(op_.getInput(0)->size(),
+               op_.getInput(0)->getImpl()->rawPtr(),
+               op_.getOutput(0)->getImpl()->rawPtr());
 }
diff --git a/src/operator/SqrtImpl.cpp b/src/operator/SqrtImpl.cpp
index 8fcb2e9d05b859b5f572f614a72ff42b1f20d4dd..edb8858fc4ac07fa5725d24688b22d64134afb0e 100644
--- a/src/operator/SqrtImpl.cpp
+++ b/src/operator/SqrtImpl.cpp
@@ -9,18 +9,18 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <chrono>  // std::chrono::milliseconds
-#include <numeric> // std::accumulate
-#include <thread>  // std::this_thread::sleep_for
+#include <memory>
 #include <vector>
 
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Sqrt.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/SqrtImpl.hpp"
 #include "aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp"
 
 Aidge::Elts_t Aidge::SqrtImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
@@ -28,15 +28,35 @@ Aidge::Elts_t Aidge::SqrtImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t
 }
 
 void Aidge::SqrtImpl_cpu::forward() {
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+    std::shared_ptr<Tensor> in0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0));
+    std::shared_ptr<Tensor> out0 = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0));
+    AIDGE_ASSERT(in0, "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<SqrtImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+        in0->dataType(),
+        out0->dataType()});
 
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+    kernelFunc(in0->size(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawOutput(0)));
+}
+
+void Aidge::SqrtImpl_cpu::backward() {
+    // reversing in and out Data for backprop
+    const Sqrt_Op& op_ = dynamic_cast<const Sqrt_Op&>(mOp);
+    std::shared_ptr<Tensor> out0grad  = op_.getOutput(0)->grad();
+    std::shared_ptr<Tensor> in0grad = op_.getInput(0)->grad();
+    AIDGE_ASSERT(out0grad, "missing output #0");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<SqrtImplForward_cpu>::create({
+        out0grad->dataType(),
+        in0grad->dataType()});
+
+    // Call kernel
+    kernelFunc(out0grad->size(),
+        getCPUPtr(out0grad),
+        getCPUPtr(in0grad));
 }
\ No newline at end of file
diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..31fbed4c090f5e4848df12f2bc2ccd36e3aedf9d
--- /dev/null
+++ b/unit_tests/data/Test_TensorImpl.cpp
@@ -0,0 +1,192 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::uint16_t
+#include <chrono>
+#include <iostream>
+#include <memory>
+#include <numeric>   // std::accumulate
+#include <random>    // std::random_device, std::mt19937, std::uniform_real_distribution
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/backend/cpu/operator/AddImpl.hpp"
+
+namespace Aidge {
+
+TEST_CASE("Test addition of Tensors","[TensorImpl][Add]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
+    std::uniform_int_distribution<int> boolDist(0,1);
+
+    // Create MatMul Operator
+    std::shared_ptr<Node> mySub = Add(2);
+    auto op = std::static_pointer_cast<OperatorTensor>(mySub-> getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // Create 2 input Tensors
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op->associateInput(0,T0);
+    T0->setDataType(DataType::Float32);
+    T0->setBackend("cpu");
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+    T1->setDataType(DataType::Float32);
+    T1->setBackend("cpu");
+
+    // Create results Tensor
+    Tensor Tres{};
+    Tres.setDataType(DataType::Float32);
+    Tres.setBackend("cpu");
+
+    // To measure execution time of 'MatMul_Op::forward()' member function call
+    std::chrono::time_point<std::chrono::system_clock> start;
+    std::chrono::time_point<std::chrono::system_clock> end;
+    std::chrono::duration<double, std::micro> duration{};
+
+    std::size_t number_of_operation = 0;
+
+    for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+        // generate 2 random Tensors
+        // handle dimensions, replace some dimensions with '1' to get broadcasting
+        constexpr std::size_t nbDims = 4;
+        std::vector<std::size_t> dims;
+        for (std::size_t i = 0; i < nbDims; ++i) {
+            dims.push_back(dimSizeDist(gen));
+        }
+        std::vector<std::size_t> dims0 = dims;
+        std::vector<std::size_t> dims1 = dims;
+        std::vector<std::size_t> dimsOut = dims;
+        for (std::size_t i = 0; i < nbDims; ++i) {
+            if (boolDist(gen)) {
+                dims0[i] = 1;
+            }
+            if (boolDist(gen)) {
+                dims1[i] = 1;
+            }
+            dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i];
+        }
+
+        // create arrays and fill them with random values
+        float* array0 = new float[dims0[0]*dims0[1]*dims0[2]*dims0[3]];
+        float* array1 = new float[dims1[0]*dims1[1]*dims1[2]*dims1[3]];
+        float* result = new float[dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]];
+
+        for (std::size_t i = 0; i < dims0[0]*dims0[1]*dims0[2]*dims0[3]; ++i) {
+            array0[i] = valueDist(gen);
+        }
+        for (std::size_t i = 0; i < dims1[0]*dims1[1]*dims1[2]*dims1[3]; ++i) {
+            array1[i] = valueDist(gen);
+        }
+
+        // compute true result
+        const std::size_t strides0[nbDims] = {dims0[1]*dims0[2]*dims0[3], dims0[2]*dims0[3], dims0[3], 1};
+        const std::size_t strides1[nbDims] = {dims1[1]*dims1[2]*dims1[3], dims1[2]*dims1[3], dims1[3], 1};
+        for (std::size_t a = 0; a < dimsOut[0]; ++a) {
+            for (std::size_t b = 0; b < dimsOut[1]; ++b) {
+                const std::size_t idx0_0 = strides0[0] * ((dims0[0] > 1) ? a : 0)
+                                            + strides0[1] * ((dims0[1] > 1) ? b : 0);
+                const std::size_t idx1_0 = strides1[0] * ((dims1[0] > 1) ? a : 0)
+                                            + strides1[1] * ((dims1[1] > 1) ? b : 0);
+                for (std::size_t c = 0; c < dimsOut[2]; ++c) {
+                    const std::size_t idx_out = dimsOut[3] * (c + dimsOut[2] * (b + dimsOut[1] * a));
+                    for (std::size_t d = 0; d < dimsOut[3]; ++d) {
+                        std::size_t idx0 = idx0_0
+                                            + strides0[2] * ((dims0[2] > 1) ? c : 0)
+                                            + ((dims0[3] > 1) ? d : 0);
+                        std::size_t idx1 = idx1_0
+                                            + strides1[2] * ((dims1[2] > 1) ? c : 0)
+                                            + ((dims1[3] > 1) ? d : 0);
+                        result[idx_out + d] = array0[idx0] + array1[idx1];
+                        // std::cout << "(" << idx0 << ", " << idx1 << ") -> " << array0[idx0] << " - " << array1[idx1] << " -> " << idx_out + d << std::endl;
+                    }
+                }
+            }
+        }
+
+        // conversion to Aidge::Tensors
+        // input0
+        T0->resize(dims0);
+        T0->getImpl() -> setRawPtr(array0, dims0[0]*dims0[1]*dims0[2]*dims0[3]);
+
+        // input1
+        T1->resize(dims1);
+        T1->getImpl() -> setRawPtr(array1, dims1[0]*dims1[1]*dims1[2]*dims1[3]);
+
+        // results
+        Tres.resize(dimsOut);
+        Tres.getImpl() -> setRawPtr(result, dimsOut[0]*dimsOut[1]*dimsOut[2]*dimsOut[3]);
+
+        Tensor T2 = *T0 + *T1;
+        REQUIRE(T2 == Tres);
+
+    // no implementation
+        Tensor T3(T1->dims());
+        REQUIRE_THROWS(*T0 + T3);
+
+        // // wrong backend
+        // static Registrar<Add_Op> registrarAddImpl_custom("custom", [](const Add_Op& op) { return std::make_unique<AddImpl_cpu>(op); } );
+        // static Registrar<Tensor> registrarTensorImpl_custom_Int32({"custom", DataType::Int32},
+        //             [] (DeviceIdx_t device, std::vector<DimSize_t> dims) {
+        //                 return std::make_shared<TensorImpl_cpu<int>>(device, dims);
+        //             }
+        //         );
+        // T1.setBackend("custom");
+        // REQUIRE_THROWS(T0 + T1);
+
+    // wrong datatype
+        Tensor T4(T1->dims());
+        T4.setDataType(DataType::Float64);
+        REQUIRE_THROWS(*T0 + T4);
+    }
+}
+
+TEST_CASE("Test substraction of Tensors","[TensorImpl][Sub]") {
+    Tensor T0 = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+    Tensor T1 = Array3D<int, 2, 2, 2>{{{{7, 1}, {3, 7}}, {{54, 0}, {7, 12}}}};
+    Tensor T2 = T0 - T1;
+    T2.print();
+    REQUIRE(T2 == Tensor(Array3D<int, 2, 2, 2>{{{{-6,1},{0,-3}},{{-49,6},{0,-4}}}}));
+
+    Tensor T3(T1.dims());
+    REQUIRE_THROWS(T0 - T3);
+}
+
+TEST_CASE("Test multiplication of Tensors","[TensorImpl][Mul]") {
+    Tensor T0 = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+    Tensor T1 = Array3D<int, 2, 2, 2>{{{{7, 2}, {3, 7}}, {{5, 6}, {7, 8}}}};
+    Tensor T2 = T0 * T1;
+    T2.print();
+    REQUIRE(T2 == Tensor(Array3D<int, 2, 2, 2>{{{{7,4},{9,28}},{{25,36},{49,64}}}}));
+
+    Tensor T3(T1.dims());
+    REQUIRE_THROWS(T0 * T3);
+}
+
+TEST_CASE("Test division of Tensors","[TensorImpl][Div]") {
+    Tensor T0 = Array3D<int, 2, 2, 2>{{{{7,4},{9,28}},{{25,36},{49,64}}}};
+    Tensor T1 = Array3D<int, 2, 2, 2>{{{{7, 2}, {3, 7}}, {{5, 6}, {7, 8}}}};
+    Tensor T2 = T0 / T1;
+    T2.print();
+    REQUIRE(T2 == Tensor(Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}));
+
+    Tensor T3(T1.dims());
+    REQUIRE_THROWS(T0 / T3);
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_ReduceMeanImpl.cpp b/unit_tests/operator/Test_ReduceMeanImpl.cpp
index 494b7a6ace17173ef7b956bc9dabf4d27e665e5a..d9bf68b78d1ece371cbfb5cda3c502f82eaf97de 100644
--- a/unit_tests/operator/Test_ReduceMeanImpl.cpp
+++ b/unit_tests/operator/Test_ReduceMeanImpl.cpp
@@ -17,6 +17,7 @@
 #include "aidge/operator/Conv.hpp"
 
 #include "aidge/backend/cpu.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 
 using namespace Aidge;
 
@@ -138,35 +139,60 @@ TEST_CASE("[cpu/operator] ReduceMean(forward)", "[ReduceMean][CPU]") {
 
     }
     SECTION("all_axes") {
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
-            {
-                {
-                    { 5.0, 1.0 },
-                    { 20.0, 2.0 }
-                },
-                {
-                    { 30.0, 1.0 },
-                    { 40.0, 2.0 }
-                },
+        SECTION("1") {
+            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array3D<float,3,2,2> {
                 {
-                    { 55.0, 1.0 },
-                    { 60.0, 2.0 }
+                    {
+                        { 5.0, 1.0 },
+                        { 20.0, 2.0 }
+                    },
+                    {
+                        { 30.0, 1.0 },
+                        { 40.0, 2.0 }
+                    },
+                    {
+                        { 55.0, 1.0 },
+                        { 60.0, 2.0 }
+                    }
                 }
-            }
-        });
-        std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> {
-            {18.25}
-        });
+            });
+            std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> {
+                {18.25}
+            });
 
-        std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1, 2}, 0);
-        auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
-        op->associateInput(0,myInput);
-        op->setDataType(DataType::Float32);
-        op->setBackend("cpu");
-        op->computeOutputDims();
-        myReduceMean->forward();
-        op->getOutput(0)->print();
+            std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1, 2}, 0);
+            auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
+            op->associateInput(0,myInput);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            op->computeOutputDims();
+            myReduceMean->forward();
+            op->getOutput(0)->print();
 
-        REQUIRE(*(op->getOutput(0)) == *myOutput);
+            REQUIRE(*(op->getOutput(0)) == *myOutput);
+        }
+        SECTION("2") {
+            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<float,5,4> {
+               {{ 0.004232f, 0.105120f, 0.045124f, 0.009205f},
+                { 0.000766f, 0.272162f, 0.503560f, 0.044163f},
+                { 0.049755f, 0.000305f, 0.143634f, 0.013253f},
+                { 0.096258f, 0.311231f, 0.358143f, 0.000452f},
+                { 0.468617f, 0.015693f, 0.145316f, 0.000105f}}
+            });
+            std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array1D<float,1> {
+                {0.1293547f}
+            });
+
+            std::shared_ptr<Node> myReduceMean = ReduceMean({0, 1}, 0);
+            auto op = std::static_pointer_cast<OperatorTensor>(myReduceMean -> getOperator());
+            op->associateInput(0,myInput);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            op->computeOutputDims();
+            myReduceMean->forward();
+            op->getOutput(0)->print();
+            // approxEq<float>(*(op->getOutput(0)), *myOutput);
+            REQUIRE(approxEq<float>(*(op->getOutput(0)), *myOutput));
+        }
     }
 }
\ No newline at end of file
diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp
index d2306ad0127f19c06ac2b84f8ab83673a56c35b2..0bbe59643df050759c209878135da67a0c94d6ce 100644
--- a/unit_tests/scheduler/Test_Scheduler.cpp
+++ b/unit_tests/scheduler/Test_Scheduler.cpp
@@ -21,6 +21,7 @@
 #include "aidge/scheduler/ParallelScheduler.hpp"
 
 #include "aidge/backend/cpu.hpp"
+#include "aidge/recipes/GraphViewHelper.hpp"
 
 using namespace Aidge;
 
@@ -346,7 +347,7 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
 
         std::vector<std::shared_ptr<Aidge::Tensor>> dataIn = {inputTensor};
         REQUIRE_NOTHROW(scheduler.forward(true, dataIn));
-        
+
         scheduler.saveSchedulingDiagram("schedulingSequential");
 
         std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
@@ -391,4 +392,45 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
         bool equal4 = (*other4 == expectedOutput4);
         REQUIRE(equal4);
     }
+}
+
+TEST_CASE("[cpu/scheduler] SequentialScheduler(backward)", "[scheduler][backward]") {
+
+    // create GraphView
+    std::shared_ptr<GraphView> gv = Sequential({ReLU("relu0"), Sqrt("srqt0"), ReLU("relu1")});
+
+    std::shared_ptr<Tensor> inputTensor =
+            std::make_shared<Tensor>(Array4D<float, 2, 1, 5, 5>{{{{{0.0f,  1.0f,  2.0f,  3.0f,  4.0f},
+                                                                 {5.0f,  6.0f,  7.0f,  8.0f,  9.0f},
+                                                                {10.0f, 11.0f, 12.0f, 13.0f, 14.0f},
+                                                                {15.0f, 16.0f, 17.0f, 18.0f, 19.0f},
+                                                                {20.0f, 21.0f, 22.0f, 23.0f, 24.0f}}},
+                                                              {{{25.0f, 26.0f, 27.0f, 28.0f, 29.0f},
+                                                                {30.0f, 31.0f, 32.0f, 33.0f, 34.0f},
+                                                                {35.0f, 36.0f, 37.0f, 38.0f, 39.0f},
+                                                                {40.0f, 41.0f, 42.0f, 43.0f, 44.0f},
+                                                                {45.0f, 46.0f, 47.0f, 48.0f, 49.0f}}}}});
+    auto label = inputTensor;
+    // implem already set to default
+    auto myProd = Producer(inputTensor, "prod");
+    myProd -> addChild(gv);
+    gv -> compile("cpu", DataType::Float32);
+    compile_gradient(gv);
+    SequentialScheduler scheduler(gv);
+    scheduler.forward();
+    auto predictedOutput = gv->getOrderedOutputs()[0].first;
+
+    std::shared_ptr<Tensor> targetOutput =
+          std::make_shared<Tensor>(Array4D<float, 2, 1, 5, 5>{{{{{0.0f, 1.0f, 1.0f, 2.0f, 2.0f},
+                                                                 {2.0f, 2.0f, 3.0f, 3.0f, 3.0f},
+                                                                 {3.0f, 3.0f, 3.0f, 4.0f, 4.0f},
+                                                                 {4.0f, 4.0f, 4.0f, 4.0f, 4.0f},
+                                                                 {4.0f, 5.0f, 5.0f, 5.0f, 5.0f}}},
+                                                               {{{5.0f, 5.0f, 5.0f, 5.0f, 5.0f},
+                                                                 {5.0f, 6.0f, 6.0f, 6.0f, 6.0f},
+                                                                 {6.0f, 6.0f, 6.0f, 6.0f, 6.0f},
+                                                                 {6.0f, 6.0f, 6.0f, 7.0f, 7.0f},
+                                                                 {7.0f, 7.0f, 7.0f, 7.0f, 7.0f}}}}});
+
+    REQUIRE_NOTHROW(scheduler.backward({targetOutput}));
 }
\ No newline at end of file