diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp
index e39c35b42fdb6065aa72aee092cd1cd23b2b1011..ca04dff9164ecc8492d9263b32b60272dbbad395 100644
--- a/include/aidge/backend/cpu/operator/AddImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl.hpp
@@ -25,7 +25,19 @@
 namespace Aidge {
 // Operator implementation entry point for the backend
 using AddImpl_cpu = OperatorImpl_cpu<Add_Op,
-    void(std::vector<std::size_t>, std::vector<std::size_t>, const std::vector<std::size_t>&, const void*, const void*, void*)>;
+    void(std::vector<std::size_t>, std::vector<std::size_t>, const std::vector<std::size_t>&, const void*, const void*, void*),
+    void(const std::size_t, 
+         const std::size_t, 
+         const std::size_t, 
+         const std::vector<std::size_t>&, 
+         const std::vector<std::size_t>&, 
+         const std::vector<std::size_t>&, 
+         const void*, 
+         const void*, 
+         const void*, 
+         void*, 
+         void*)
+>;
 
 // Implementation entry point registration to Operator
 REGISTRAR(Add_Op, "cpu", Aidge::AddImpl_cpu::create);
diff --git a/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp b/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp
index e6d13fcf3699824a8410015d35ff766adf617c11..d6fff9b58d381895350998e11bae01684718ad3e 100644
--- a/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp
@@ -147,25 +147,75 @@ void AddImpl_cpu_forward_kernel(std::vector<std::size_t> dims0,
     }
 }
 
+template <class I, class O>
+void AddImpl_cpu_backward_kernel(const std::size_t input0Length,
+                               const std::size_t input1Length,
+                               const std::size_t gradOutputLength,
+                               const std::vector<std::size_t>& dims0,
+                               const std::vector<std::size_t>& dims1,
+                               const std::vector<std::size_t>& outputDims,
+                               const void* input0_,
+                               const void* input1_,
+                               const void* grad_output_,
+                               void* gradientInput0_,
+                               void* gradientInput1_)
+{
+    // TODO: Remove input0/1 from the function
+    const I* input0 = static_cast<const I*>(input0_);
+    const I* input1 = static_cast<const I*>(input1_);
+    const O* gradOutput = static_cast<const O*>(grad_output_);
+    auto* gradInput0 = static_cast<I*>(gradientInput0_);
+    auto* gradInput1 = static_cast<I*>(gradientInput1_);
+
+    std::fill_n(gradInput0, input0Length, static_cast<I>(0));
+    std::fill_n(gradInput1, input1Length, static_cast<I>(0));
+
+    auto broadcastedDims0 = getBroadcastedDims(outputDims, dims0);
+    auto broadcastedDims1 = getBroadcastedDims(outputDims, dims1);
+
+    for (std::size_t i = 0; i < gradOutputLength; ++i) {
+        auto idxOutputGrad = getMultiDimIndices(outputDims, i);
+        std::vector<std::size_t> idxInput0(broadcastedDims0.size());
+        std::vector<std::size_t> idxInput1(broadcastedDims1.size());
+
+        for (std::size_t dimension = 0; dimension < broadcastedDims0.size(); ++dimension) {
+            idxInput0[dimension] = (broadcastedDims0[dimension] == 1) ? 0 : idxOutputGrad[dimension];
+        }
+
+        for (std::size_t dimension = 0; dimension < broadcastedDims1.size(); ++dimension) {
+            idxInput1[dimension] = (broadcastedDims1[dimension] == 1) ? 0 : idxOutputGrad[dimension];
+        }
+
+        auto idx0 = getFlattenedIndex(broadcastedDims0, idxInput0);
+        auto idx1 = getFlattenedIndex(broadcastedDims1, idxInput1);
+
+        // For addition: gradient of both inputs is just the output gradient
+        // (unlike multiplication where we need to multiply by the other input,
+        // or subtraction where we need to negate one of them)
+        gradInput0[idx0] += static_cast<I>(gradOutput[i]);
+        gradInput1[idx1] += static_cast<I>(gradOutput[i]);
+    }
+}
+
 // Kernels registration to implementation entry point
 REGISTRAR(AddImpl_cpu,
     {ImplSpec::IOSpec{DataType::Any}, ImplSpec::IOSpec{DataType::Float32}},
-    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<float, float>, nullptr});
+    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<float, float>, Aidge::AddImpl_cpu_backward_kernel<float, float>});
 REGISTRAR(AddImpl_cpu,
     {ImplSpec::IOSpec{DataType::Any}, ImplSpec::IOSpec{DataType::Float64}},
-    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<double, double>, nullptr});
+    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<double, double>, Aidge::AddImpl_cpu_backward_kernel<double, double>});
 REGISTRAR(AddImpl_cpu,
     {ImplSpec::IOSpec{DataType::Any}, ImplSpec::IOSpec{DataType::Int8}},
-    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<std::int8_t, std::int8_t>, nullptr});
+    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<std::int8_t, std::int8_t>, Aidge::AddImpl_cpu_backward_kernel<std::int8_t, std::int8_t>});
 REGISTRAR(AddImpl_cpu,
     {ImplSpec::IOSpec{DataType::Any}, ImplSpec::IOSpec{DataType::UInt8}},
-    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<std::uint8_t, std::uint8_t>, nullptr});
+    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<std::uint8_t, std::uint8_t>, Aidge::AddImpl_cpu_backward_kernel<std::uint8_t, std::uint8_t>});
 REGISTRAR(AddImpl_cpu,
     {ImplSpec::IOSpec{DataType::Any}, ImplSpec::IOSpec{DataType::Int32}},
-    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<std::int32_t, std::int32_t>, nullptr});
+    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<std::int32_t, std::int32_t>, Aidge::AddImpl_cpu_backward_kernel<std::int32_t, std::int32_t>});
 REGISTRAR(AddImpl_cpu,
     {ImplSpec::IOSpec{DataType::Any}, ImplSpec::IOSpec{DataType::Int64}},
-    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<std::int64_t, std::int64_t>, nullptr});
+    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<std::int64_t, std::int64_t>, Aidge::AddImpl_cpu_backward_kernel<std::int64_t, std::int64_t>});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_CPU_KERNELS_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_CPU_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/ConstantOfShapeImpl.hpp b/include/aidge/backend/cpu/operator/ConstantOfShapeImpl.hpp
index 83e7e030f526e0db3cff4741eabe39e287130562..b595ec9300c27740408993fc501923600967edff 100644
--- a/include/aidge/backend/cpu/operator/ConstantOfShapeImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConstantOfShapeImpl.hpp
@@ -12,23 +12,21 @@
 #ifndef AIDGE_CPU_OPERATOR_CONSTANTOFSHAPEIMPL_H_
 #define AIDGE_CPU_OPERATOR_CONSTANTOFSHAPEIMPL_H_
 
-#include <cstddef>
 #include <memory>
-#include <vector>
 
 #include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/ConstantOfShape.hpp"
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/utils/Types.h"
 
 namespace Aidge {
+
+class Tensor;
 // Operator implementation entry point for the backend
 using ConstantOfShapeImpl_cpu = OperatorImpl_cpu<ConstantOfShape_Op,
-    void(const std::vector<DimSize_t>, const Tensor&, void *)>;
+    void(const std::shared_ptr<Tensor>&, const Tensor&)>;
 
 // Implementation entry point registration to Operator
 REGISTRAR(ConstantOfShape_Op, "cpu", Aidge::ConstantOfShapeImpl_cpu::create);
 } // namespace Aidge
 
 #endif /* _AIDGE_CPU_OPERATOR_CONSTANTOFSHAPEIMPL_H_ */
-
diff --git a/include/aidge/backend/cpu/operator/ConstantOfShapeImpl_kernels.hpp b/include/aidge/backend/cpu/operator/ConstantOfShapeImpl_kernels.hpp
index 18ab9c0a77c4545c955fc4fe1f1fc1cbcb763bf7..c42cc76a67dbd25564d3ebefa8580454ce34cf0d 100644
--- a/include/aidge/backend/cpu/operator/ConstantOfShapeImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConstantOfShapeImpl_kernels.hpp
@@ -30,20 +30,11 @@
 namespace Aidge {
 template <class O>
 void ConstantOfShapeimpl_cpu_forward_kernel(
-    const std::vector<DimSize_t> output_dims, const Tensor &value,
-    void *output_) {
+    const std::shared_ptr<Tensor>& output_, const Tensor &value) {
 
-  O *output = static_cast<O *>(output_);
-  O val;
-  std::copy(static_cast<O *>(value.getImpl()->hostPtr()),
-            static_cast<O *>(value.getImpl()->hostPtr()) +
-                static_cast<NbElts_t>(1),
-            &val);
-  const size_t output_size = std::accumulate(
-      output_dims.begin(), output_dims.end(), 1, std::multiplies<DimSize_t>());
-  for (size_t i = 0; i < output_size; ++i) {
-    output[i] = val;
-  }
+  O* output = static_cast<O*>(output_->getImpl()->hostPtr());
+  const O val = *reinterpret_cast<O*>(value.getImpl()->hostPtr());
+  std::fill_n(output, output_->size(), val);
 }
 
 // Kernels registration to implementation entry point
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index 101743eccb606c998a38f49dd9b89f5ec279bcae..b027fb876c8e597c85d13fea0f1fb6dd1207a1d9 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -55,5 +55,28 @@ void  Aidge::AddImpl_cpu::forward() {
 
 template <>
 void Aidge::AddImpl_cpu::backward() {
-    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for Add_Op on backend cpu");
+    const Add_Op& op_ = dynamic_cast<const Add_Op&>(mOp);
+
+    auto in0 = op_.getInput(0);
+    auto in1 = op_.getInput(1);
+    auto in0grad = op_.getInput(0)->grad();
+    auto in1grad = op_.getInput(1)->grad();
+    auto out0grad = op_.getOutput(0)->grad();
+
+    // Find the correct kernel type
+    const auto impl = Registrar<AddImpl_cpu>::create(getBestMatch(getRequiredSpec()));
+
+    // Call kernel
+    impl.backward(in0grad->size(),
+               in1grad->size(),
+               out0grad->size(),
+               in0->dims(),
+               in1->dims(),
+               out0grad->dims(),
+               getCPUPtr(in0),
+               getCPUPtr(in1),
+               getCPUPtr(out0grad),
+               getCPUPtr(in0grad),
+               getCPUPtr(in1grad));
+
 }
diff --git a/src/operator/ConstantOfShapeImpl.cpp b/src/operator/ConstantOfShapeImpl.cpp
index 16e4b762ba04e5f01bfccf965f6de3650fa2e734..1d41160b7738f4d8d8af103f25a0f3554f1e4442 100644
--- a/src/operator/ConstantOfShapeImpl.cpp
+++ b/src/operator/ConstantOfShapeImpl.cpp
@@ -13,15 +13,14 @@
 
 #include <functional>
 #include <memory>
-#include <vector>
+#include <stdexcept>   // std::runtime_error
 
 #include "aidge/backend/cpu/operator/ConstantOfShapeImpl_kernels.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/ConstantOfShape.hpp"
+#include "aidge/backend/OperatorImpl.hpp"  // Aidge::getBestMatch, Aidge::getRequiredSpec
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
-#include "aidge/utils/Types.h"
 
 template <>
 void Aidge::ConstantOfShapeImpl_cpu::forward() {
@@ -33,9 +32,7 @@ void Aidge::ConstantOfShapeImpl_cpu::forward() {
     const auto impl = Registrar<ConstantOfShapeImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    impl.forward(op_.getOutput(0)->dims(),
-             op_.value(), 
-             op_.getOutput(0)->getImpl()->rawPtr());
+    impl.forward(op_.getOutput(0), op_.value());
 }
 
 template <>
diff --git a/src/operator/PadImpl.cpp b/src/operator/PadImpl.cpp
index cdae21f8ed2757128f6a36b661b0897a4ba65f89..9a54437f445a1842b2f97555a0cbea8988acf50a 100644
--- a/src/operator/PadImpl.cpp
+++ b/src/operator/PadImpl.cpp
@@ -9,14 +9,14 @@
  *
  ********************************************************************************/
 
+#include <cstddef>
 #include <vector>
 
-#include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include "aidge/operator/Conv.hpp"
-
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
 #include "aidge/backend/cpu/operator/PadImpl_kernels.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/Types.h"
 
 Aidge::Elts_t Aidge::Pad_ProdConso_cpu::getNbRequiredProtected(Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(inputIdx == 0, "input index out of range."
diff --git a/src/operator/ReduceSumImpl.cpp b/src/operator/ReduceSumImpl.cpp
index aad0801835a74ecefb046f3dc64729ae1f8bd8bb..93a89a3436fb7c08489a54e94b991e4e36a0e5d4 100644
--- a/src/operator/ReduceSumImpl.cpp
+++ b/src/operator/ReduceSumImpl.cpp
@@ -12,11 +12,14 @@
 #include "aidge/backend/cpu/operator/ReduceSumImpl.hpp"
 
 #include <memory>
+#include <stdexcept>
 #include <vector>
 
-#include "aidge/utils/Types.h"
-#include "aidge/operator/ReduceSum.hpp"
 #include "aidge/backend/cpu/operator/ReduceSumImpl_kernels.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/ReduceSum.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
 
 template <>
 void Aidge::ReduceSumImpl_cpu::forward() {
diff --git a/unit_tests/operator/Test_AddImpl.cpp b/unit_tests/operator/Test_AddImpl.cpp
index bff9629be152163b2aa92bdc9d0c3029d7987b9b..4538b322a312457cf51e75a274724c5ab536f5ab 100644
--- a/unit_tests/operator/Test_AddImpl.cpp
+++ b/unit_tests/operator/Test_AddImpl.cpp
@@ -10,6 +10,7 @@
  ********************************************************************************/
 
 #include <memory>
+#include <random>
 
 #include <catch2/catch_test_macros.hpp>
 
@@ -19,6 +20,7 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Add.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 
 using namespace Aidge;
 
@@ -139,4 +141,275 @@ TEST_CASE("[cpu/operator] Add(forward)", "[Add][CPU]") {
         Log::info("Expected Add_1 Tensor:\n{}", expectedOutput);
         REQUIRE(*op_1->getOutput(0) == expectedOutput);
     }
-}
\ No newline at end of file
+}
+
+TEST_CASE("[cpu/operator] Add(backward)", "[Add][CPU]") {
+    std::shared_ptr<Add_Op> op = std::make_shared<Add_Op>();
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // NOTE: The first four tests use fixed values, the last one uses random values but static dimensions.
+
+    SECTION("Case 1: 1D and 2D Tensors") {
+        const auto T0 = std::make_shared<Tensor>(
+            Array2D<cpptype_t<DataType::Float32>, 2, 3>({{{1, 2, 3}, {4, 5, 6}}}));
+
+        const auto T1 =
+            std::make_shared<Tensor>(Array1D<cpptype_t<DataType::Float32>, 3>({0.1, 0.2, 0.3}));
+
+        op->associateInput(0, T0);
+        op->associateInput(1, T1);
+        op->getOutput(0)->setGrad(std::make_shared<Tensor>(
+            Array2D<float, 2, 3>({{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}})));
+        op->forwardDims();
+
+        op->backward();
+
+        const Tensor expectedGrad0 =
+            Array2D<cpptype_t<DataType::Float32>, 2, 3>({{{1, 1, 1}, {1, 1, 1}}});
+
+        const Tensor expectedGrad1 = Array1D<cpptype_t<DataType::Float32>, 3>({2, 2, 2});
+
+
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(1)->grad()), expectedGrad1));
+    }
+
+    SECTION("Case 2: 3D and 1D tensors") {
+        const auto T0 = std::make_shared<Tensor>(Array3D<float, 2, 2, 3>(
+            {{{{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}},
+              {{7.0, 8.0, 9.0}, {10.0, 11.0, 12.0}}}}));
+
+        const auto T1 =
+            std::make_shared<Tensor>(Array1D<float, 3>({0.3, 0.2, 0.1}));
+
+        const auto newGrad = std::make_shared<Tensor>(Array3D<float, 2, 2, 3>(
+            {{{{1, 1, 1}, {1, 1, 1}}, {{1, 1, 1}, {1, 1, 1}}}}));
+
+        const Tensor expectedGrad0 =
+            Array3D<float, 2, 2, 3>({{{{1, 1, 1}, {1, 1, 1}},
+                                      {{1, 1, 1}, {1, 1, 1}}}});
+
+        const Tensor expectedGrad1 = Array1D<cpptype_t<DataType::Float32>, 3>({4, 4, 4});
+
+        op->associateInput(0, T0);
+        op->associateInput(1, T1);
+        op->getOutput(0)->setGrad(newGrad);
+        op->forwardDims();
+        op->backward();
+
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(1)->grad()), expectedGrad1));
+    }
+
+    SECTION("Case 3: 4D and 2D tensors") {
+        const auto T0 = std::make_shared<Tensor>(Array4D<cpptype_t<DataType::Float32>, 2, 2, 3, 3>(
+            {{{{{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}},
+               {{10.0, 11.0, 12.0}, {13.0, 14.0, 15.0}, {16.0, 17.0, 18.0}}},
+              {{{19.0, 20.0, 21.0}, {22.0, 23.0, 24.0}, {25.0, 26.0, 27.0}},
+               {{28.0, 29.0, 30.0},
+                {31.0, 32.0, 33.0},
+                {34.0, 35.0, 36.0}}}}}));
+
+        const auto T1 = std::make_shared<Tensor>(Array2D<cpptype_t<DataType::Float32>, 3, 3>(
+            {{{0.5, 0.3, 0.1}, {0.4, 0.2, 0.6}, {0.7, 0.8, 0.9}}}));
+
+        const auto newGrad =
+            std::make_shared<Tensor>(Array4D<cpptype_t<DataType::Float32>, 2, 2, 3, 3>(
+                {{{{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}},
+                   {{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}},
+                  {{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}},
+                   {{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}}}}));
+
+        const Tensor expectedGrad0 =
+            Array4D<cpptype_t<DataType::Float32>, 2, 2, 3, 3>(
+                {{{{{1, 1, 1}, {1, 1, 1}, {1, 1, 1}},
+                   {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}}},
+                  {{{1, 1, 1}, {1, 1, 1}, {1, 1, 1}},
+                   {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}}}}});
+
+        const Tensor expectedGrad1 =
+            Array2D<cpptype_t<DataType::Float32>, 3, 3>({{
+                                   {4.0, 4.0, 4.0},
+                                   {4.0, 4.0, 4.0},
+                                   {4.0, 4.0, 4.0}}});
+
+        op->associateInput(0, T0);
+        op->associateInput(1, T1);
+        op->getOutput(0)->setGrad(newGrad);
+        op->forwardDims();
+
+        op->backward();
+
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(1)->grad()), expectedGrad1));
+    }
+
+    SECTION("Case 4: 3D and 2D tensors") {
+        const auto T0 = std::make_shared<Tensor>(
+            Array3D<float, 2, 3, 4>({{{
+                                          {1.0, 2.0, 3.0, 4.0},
+                                          {5.0, 6.0, 7.0, 8.0},
+                                          {9.0, 10.0, 11.0, 12.0},
+                                      },
+                                      {
+                                          {13.0, 14.0, 15.0, 16.0},
+                                          {17.0, 18.0, 19.0, 20.0},
+                                          {21.0, 22.0, 23.0, 24.0},
+                                      }}}));
+
+        const auto T1 = std::make_shared<Tensor>(
+            Array2D<cpptype_t<DataType::Float32>, 3, 4>({{{0.1, 0.2, 0.3, 0.4},
+                                   {0.5, 0.6, 0.7, 0.8},
+                                   {0.9, 1.0, 1.1, 1.2}}}));
+
+        const auto newGrad = std::make_shared<Tensor>(
+            Array3D<cpptype_t<DataType::Float32>, 2, 3, 4>({{{
+                                          {1.0, 1.0, 1.0, 1.0},
+                                          {1.0, 1.0, 1.0, 1.0},
+                                          {1.0, 1.0, 1.0, 1.0},
+                                      },
+                                      {
+                                          {1.0, 1.0, 1.0, 1.0},
+                                          {1.0, 1.0, 1.0, 1.0},
+                                          {1.0, 1.0, 1.0, 1.0},
+                                      }}}));
+
+        const Tensor expectedGrad0 =
+            Array3D<cpptype_t<DataType::Float32>, 2, 3, 4>({{{{1, 1, 1, 1},
+                                       {1, 1, 1, 1},
+                                       {1, 1, 1, 1}},
+                                      {{1, 1, 1, 1},
+                                       {1, 1, 1, 1},
+                                       {1, 1, 1, 1}}}});
+
+        const Tensor expectedGrad1 =
+            Array2D<cpptype_t<DataType::Float32>, 3, 4>({{{2.0, 2.0, 2.0, 2.0},
+                                   {2.0, 2.0, 2.0, 2.0},
+                                   {2.0, 2.0, 2.0, 2.0}}});
+
+        op->associateInput(0, T0);
+        op->associateInput(1, T1);
+        op->getOutput(0)->setGrad(newGrad);
+        op->forwardDims();
+
+        op->backward();
+
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(1)->grad()), expectedGrad1));
+    }
+
+    SECTION("Case 5: Tensors with random values") {
+
+        // Use random values
+        const std::vector<std::size_t> dims0 = {5, 2, 1, 7}; // First tensor
+        const std::vector<std::size_t> dims1 = {2, 6, 7};    // Second tensor
+        const std::vector<std::size_t> outputDims = {5, 2, 6, 7};
+
+        std::random_device rd;
+        std::mt19937 gen(rd());
+        std::uniform_real_distribution<float> dist(0.1f, 1.0f);
+
+        auto T0 = std::make_shared<Tensor>(dims0);
+        T0->setDataType(DataType::Float32);
+        T0->setBackend("cpu");
+        float* input0Data = static_cast<float*>(T0->getImpl()->rawPtr());
+        // Fill with random values
+        for (std::size_t i = 0; i < T0->size(); ++i) {
+            input0Data[i] = dist(gen);
+        }
+
+        auto T1 = std::make_shared<Tensor>(dims1);
+        T1->setDataType(DataType::Float32);
+        T1->setBackend("cpu");
+        float* input1Data = static_cast<float*>(T1->getImpl()->rawPtr());
+        // Fill with random values
+        for (std::size_t i = 0; i < T1->size(); ++i) {
+            input1Data[i] = dist(gen);
+        }
+
+        op->associateInput(0, T0);
+        op->associateInput(1, T1);
+
+        op->forwardDims();
+        op->forward();
+
+        Tensor expectedOutput{outputDims};
+        expectedOutput.setBackend("cpu");
+        float* expectedOutputData = static_cast<float*>(expectedOutput.getImpl()->rawPtr());
+
+        for (std::size_t n = 0; n < 5; ++n) {
+            for (std::size_t c = 0; c < 2; ++c) {
+                for (std::size_t h = 0; h < 6; ++h) {
+                    for (std::size_t w = 0; w < 7; ++w) {
+                        std::size_t outIdx = w + 7 * (h + 6 * (c + 2 * n));
+                        std::size_t in0Idx =
+                            w + 7 * (0 + 1 * (c + 2 * n)); // middle dim is 1
+                        std::size_t in1Idx =
+                            w + 7 * (h + 6 * c);           // no n dimension
+
+                        expectedOutputData[outIdx] = input0Data[in0Idx] + input1Data[in1Idx];
+                    }
+                }
+            }
+        }
+
+        auto outputTensor = op->getOutput(0);
+
+        REQUIRE(approxEq<float>(*outputTensor, expectedOutput));
+
+        // Backward pass
+        std::vector<float> gradOutputData(expectedOutput.size());
+        for (auto &val : gradOutputData) {
+            val = dist(gen);
+        }
+
+        op->getOutput(0)->setGrad(std::make_shared<Tensor>());
+        op->getOutput(0)->grad()->resize(outputDims);
+        op->getOutput(0)->grad()->getImpl()->setRawPtr(gradOutputData.data(),
+                                                       expectedOutput.size());
+
+        // Compute reference gradients
+        std::vector<float> expectedGrad0(T0->size(), 0.0f);
+        std::vector<float> expectedGrad1(T1->size(), 0.0f);
+
+        for (std::size_t n = 0; n < 5; ++n) {
+            for (std::size_t c = 0; c < 2; ++c) {
+                for (std::size_t h = 0; h < 6; ++h) {
+                    for (std::size_t w = 0; w < 7; ++w) {
+                        std::size_t outIdx = w + 7 * (h + 6 * (c + 2 * n));
+                        std::size_t in0Idx = w + 7 * (0 + 1 * (c + 2 * n));
+                        std::size_t in1Idx = w + 7 * (h + 6 * c);
+
+                        // Gradient for input0: just accumulate grad_output
+                        expectedGrad0[in0Idx] += gradOutputData[outIdx];
+
+                        // Gradient for input1: just accumulate grad_output
+                        expectedGrad1[in1Idx] += gradOutputData[outIdx];
+                    }
+                }
+            }
+        }
+
+        // Perform backward pass
+        op->backward();
+
+        auto expectedGrad0Tensor = std::make_shared<Tensor>();
+        expectedGrad0Tensor->resize(T0->dims());
+        expectedGrad0Tensor->setBackend("cpu");
+        expectedGrad0Tensor->setDataType(DataType::Float32);
+        expectedGrad0Tensor->getImpl()->setRawPtr(expectedGrad0.data(),
+                                                    expectedGrad0.size());
+
+        auto expectedGrad1Tensor = std::make_shared<Tensor>(T1->dims());
+        expectedGrad1Tensor->setBackend("cpu");
+        expectedGrad1Tensor->setDataType(DataType::Float32);
+        expectedGrad1Tensor->getImpl()->setRawPtr(expectedGrad1.data(),
+                                                    expectedGrad1.size());
+
+        // Verify backward pass
+        REQUIRE(approxEq<float>(*T0->grad(), *expectedGrad0Tensor));
+        REQUIRE(approxEq<float>(*T1->grad(), *expectedGrad1Tensor));
+    }
+}
+
diff --git a/unit_tests/operator/Test_ConstantOfShapeImpl.cpp b/unit_tests/operator/Test_ConstantOfShapeImpl.cpp
index 8ec1669b92a5116999413cf55a8c5113363ef330..6833d836e1d89996f4cebf430e51715f6bc8736d 100644
--- a/unit_tests/operator/Test_ConstantOfShapeImpl.cpp
+++ b/unit_tests/operator/Test_ConstantOfShapeImpl.cpp
@@ -27,89 +27,88 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/filler/Filler.hpp"
 #include "aidge/operator/ConstantOfShape.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/TensorUtils.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-TEST_CASE("[cpu/operator] ConstantOfShape", "[ConstantOfShape][CPU]") {
-  constexpr std::uint16_t NBTRIALS = 10;
-  // Create a random number generator
-  auto random_seed = Catch::Generators::Detail::getSeed;
-  std::mt19937 gen(random_seed());
-  std::uniform_real_distribution<float> valueDist(
-      0.1f, 1.1f); // Random float distribution between 0 and 1
-  std::uniform_int_distribution<DimSize_t> input_tensor_size_dist(
-      std::size_t(1), std::size_t(10));
-  std::uniform_int_distribution<int64_t> input_tensor_values_dist(
-      std::size_t(1), std::size_t(7));
-  std::uniform_real_distribution<double> operator_attr_value_dist(-100., 100.);
 
-  ///////////////////////////////////////////////
-  // SETUP FUNCTIONS
-  auto generate_input_tensor =
-      [&gen, &input_tensor_size_dist,
-       &input_tensor_values_dist]() -> std::shared_ptr<Tensor> {
-    std::vector<DimSize_t> input_dims;
-    input_dims.push_back(input_tensor_size_dist(gen));
+TEST_CASE("[cpu/operator] ConstantOfShape(forward)", "[ConstantOfShape][CPU][forward]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    auto random_seed = Catch::Generators::Detail::getSeed;
+    std::mt19937 gen(random_seed());
+    std::uniform_real_distribution<float> valueDist(
+            0.1f, 1.1f); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<DimSize_t> input_tensor_size_dist(
+            std::size_t(1), std::size_t(10));
+    std::uniform_int_distribution<int64_t> input_tensor_values_dist(
+            std::size_t(1), std::size_t(7));
+    std::uniform_real_distribution<double> operator_attr_value_dist(-100., 100.);
 
-    auto result = std::make_shared<Tensor>(input_dims);
-    result->setDataType(DataType::Int64);
-    result->setBackend("cpu");
-    for (DimSize_t i = 0; i < result->size(); ++i) {
-      result->set<std::int64_t>(i, input_tensor_values_dist(gen));
-    }
-    return result;
-  };
+    ///////////////////////////////////////////////
+    // SETUP FUNCTIONS
+    auto generate_input_tensor =
+            [&gen, &input_tensor_size_dist,
+             &input_tensor_values_dist]() -> std::shared_ptr<Tensor> {
+        std::vector<DimSize_t> input_dims;
+        input_dims.push_back(input_tensor_size_dist(gen));
 
-  auto generate_random_operator =
-      [&gen,
-       &operator_attr_value_dist]() -> std::shared_ptr<ConstantOfShape_Op> {
-    auto node = ConstantOfShape(Tensor(operator_attr_value_dist(gen)));
-    auto op = std::static_pointer_cast<ConstantOfShape_Op>(node->getOperator());
-    op->setDataType(DataType::Float64);
-    op->setBackend("cpu");
-    return op;
-  };
+        auto result = std::make_shared<Tensor>(input_dims);
+        result->setDataType(DataType::Int64);
+        result->setBackend("cpu");
+        for (DimSize_t i = 0; i < result->size(); ++i) {
+            result->set<std::int64_t>(i, input_tensor_values_dist(gen));
+        }
+        return result;
+    };
 
-  auto generate_output_tensor = [](std::shared_ptr<Tensor> input_tensor,
-                                   std::shared_ptr<ConstantOfShape_Op> op) {
-    std::vector<DimSize_t> output_dims;
-    output_dims.reserve(input_tensor->size());
-    for (DimSize_t i = 0; i < input_tensor->size(); ++i) {
-      output_dims.push_back(input_tensor->get<int64_t>(i));
-    }
-    auto result = std::make_shared<Tensor>(output_dims);
-    result->setDataType(op->value().dataType());
-    result->setBackend("cpu");
-    constantFiller(result, op->value().get<double>(0));
-    return result;
-  };
+    auto generate_random_operator =
+            [&gen,
+             &operator_attr_value_dist]() -> std::shared_ptr<ConstantOfShape_Op> {
+        std::shared_ptr<ConstantOfShape_Op> op = std::make_shared<ConstantOfShape_Op>(Tensor(operator_attr_value_dist(gen)));
+        op->setDataType(DataType::Float64);
+        op->setBackend("cpu");
+        return op;
+    };
+
+    auto generate_output_tensor = [](std::shared_ptr<Tensor> input_tensor,
+                                      std::shared_ptr<ConstantOfShape_Op> op) {
+        std::vector<DimSize_t> output_dims;
+        output_dims.reserve(input_tensor->size());
+        for (DimSize_t i = 0; i < input_tensor->size(); ++i) {
+            output_dims.push_back(input_tensor->get<std::int64_t>(i));
+        }
+        auto result = std::make_shared<Tensor>(output_dims);
+        result->setDataType(op->value().dataType());
+        result->setBackend("cpu");
+        constantFiller(result, op->value().get<double>(0));
+        return result;
+    };
 
-  /////////////////////////////////////
-  // BENCHMARKING
-  std::chrono::time_point<std::chrono::system_clock> start;
-  std::chrono::time_point<std::chrono::system_clock> end;
-  std::chrono::duration<double, std::micro> duration{};
-  int number_of_operation{0};
+    /////////////////////////////////////
+    // BENCHMARKING
+    std::chrono::time_point<std::chrono::system_clock> start;
+    std::chrono::time_point<std::chrono::system_clock> end;
+    std::chrono::duration<double, std::micro> duration{};
+    int number_of_operation{0};
 
-  SECTION("ConstantOfShapeImpl_cpu::forward()") {
-    for (int i = 0; i < NBTRIALS; ++i) {
-      auto input_T = generate_input_tensor();
-      std::shared_ptr<ConstantOfShape_Op> op = generate_random_operator();
-      auto output_T = generate_output_tensor(input_T, op);
-      op->associateInput(0, input_T);
+    SECTION("ConstantOfShapeImpl_cpu::forward()") {
+        for (int i = 0; i < NBTRIALS; ++i) {
+            auto input_T = generate_input_tensor();
+            std::shared_ptr<ConstantOfShape_Op> op = generate_random_operator();
+            auto output_T = generate_output_tensor(input_T, op);
+            op->associateInput(0, input_T);
 
-      REQUIRE(op->forwardDims(true));
-      REQUIRE_NOTHROW(op->forward());
+            REQUIRE(op->forwardDims(true));
+            REQUIRE_NOTHROW(op->forward());
 
-      CHECK(output_T->nbDims() == op->getOutput(0)->nbDims());
-      for (DimIdx_t i = 0; i < output_T->nbDims(); ++i) {
-        CHECK(output_T->dims().at(i) == op->getOutput(0)->dims().at(i));
-      }
-      CHECK(approxEq<double>(*output_T, *op->getOutput(0)));
+            CHECK(output_T->nbDims() == op->getOutput(0)->nbDims());
+            for (DimIdx_t i = 0; i < output_T->nbDims(); ++i) {
+                CHECK(output_T->dims().at(i) == op->getOutput(0)->dims().at(i));
+            }
+            CHECK(approxEq<double>(*output_T, *op->getOutput(0)));
+        }
     }
-  }
 }
 } // namespace Aidge
 
diff --git a/unit_tests/recipies/Test_FoldConstantOfShape.cpp b/unit_tests/recipies/Test_FoldConstantOfShape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a1c09b151ebfdb0ab54d8430c13c3ca9d3de3459
--- /dev/null
+++ b/unit_tests/recipies/Test_FoldConstantOfShape.cpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+ #include "aidge/graph/GraphView.hpp"
+ #include "aidge/operator/Identity.hpp"
+ #include "aidge/recipes/Recipes.hpp"
+
+ #include <cstdint>  // std::int64_t
+ #include <memory>
+
+ #include <catch2/catch_test_macros.hpp>
+
+ #include "aidge/graph/OpArgs.hpp"
+ #include "aidge/operator/ConstantOfShape.hpp"
+ #include "aidge/operator/Conv.hpp"
+ #include "aidge/operator/Producer.hpp"
+ #include "aidge/operator/ReLU.hpp"
+ #include "aidge/recipes/Recipes.hpp"
+ #include "aidge/utils/ArrayHelpers.hpp"
+ #include "aidge/utils/Types.h"
+
+ namespace Aidge {
+
+ TEST_CASE("[cpu/recipes] foldConstantOfShape",
+           "[ConstantOfShape][foldConstantOfShape][recipes]") {
+   auto input_T = std::make_shared<Tensor>(Array1D<std::int64_t, 4>({1, 1, 3, 3}));
+
+   auto model = std::make_shared<GraphView>();
+   SECTION("Sequential model") {
+     model = Sequential({
+         Producer(input_T, "prod_0", true),
+         ConstantOfShape(3, "constantOfShape_0"),
+         Conv(1, 1, {3, 3}, "Conv_0"),
+         ReLU("ReLU_1")
+     });
+     // aidge_backend_cpu loaded. Recipe should work
+     REQUIRE(foldConstantOfShape(model) == 1);
+     CHECK(model->forwardDims());
+   }
+ }
+
+ }  // namespace Aidge