diff --git a/include/aidge/backend/cpu/operator/ReLUImpl.hpp b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
index e2ebf44616db876b462157db650ff48362dd7bac..f8abfcf2a3f8c10ad1fe679738072684a47d6930 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
@@ -30,7 +30,7 @@ class ReLUImplForward_cpu
     : public Registrable<ReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
 };
 class ReLUImplBackward_cpu
-    : public Registrable<ReLUImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<ReLUImplBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, const void*, void*)> {
 };
 
 class ReLUImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
index 43a9714ad2d32228fac9bf9c526191f0cec5bfa0..4c4aac31b1cbf6f40b9cfee2c32686d3983bc233 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
@@ -18,28 +18,29 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-template <class O, class GI, class GO>
+template <class I, class O, class GI, class GO>
 void ReLUImpl_cpu_backward_kernel(const std::size_t inputLenght,
-                                  const void* output_, const void* grad_output_,
-                                  void* grad_input_) {
+                                  const void* input_, const void* output_, const void* grad_output_,
+								  void* grad_input_) {
+	const I* input = static_cast<const I*>(input_);
     const O* output = static_cast<const O*>(output_);
     const GO* grad_output = static_cast<const GO*>(grad_output_);
     GI* grad_input = static_cast<GI*>(grad_input_);
     for (std::size_t i = 0; i < inputLenght; ++i) {
-        grad_input[i] = (output[i] > GO(0)) ? GI(grad_output[i]) : GI(0);
+        grad_input[i] = (input[i] > 0) ? grad_output[i] : 0;
     }
 }
 
 namespace {
 static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float32(
-    {DataType::Float32, DataType::Float32, DataType::Float32},
-    Aidge::ReLUImpl_cpu_backward_kernel<float, float, float>);
+    {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
+    Aidge::ReLUImpl_cpu_backward_kernel<float, float, float, float>);
 static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Int32(
-    {DataType::Int32, DataType::Int32, DataType::Int32},
-    Aidge::ReLUImpl_cpu_backward_kernel<int, int, int>);
+    {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
+    Aidge::ReLUImpl_cpu_backward_kernel<int, int, int, int>);
 static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float64(
-    {DataType::Float64, DataType::Float64, DataType::Float64},
-    Aidge::ReLUImpl_cpu_backward_kernel<double, double, double>);
+    {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
+    Aidge::ReLUImpl_cpu_backward_kernel<double, double, double, double>);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp
index aa533786d3ce5b6f5cd501b6ba74b1be2823d407..af9c65590c7182185c9d79669dde49e592cbeb5d 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp
@@ -27,7 +27,7 @@ void ReLUImpl_cpu_forward_kernel(std::size_t inputLenght,
 
 //#pragma omp parallel for if (inputLenght > 1024)
     for (std::size_t i = 0; i < inputLenght; ++i) {
-        output[i] = input[i] > 0 ? input[i] : 0;
+        output[i] = (input[i] > 0) ? input[i] : 0;
     }
 }
 
diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
index 2e43023d678c8a4258c80fb91d82d2858fcdf188..ed9ffe138ae0430d976dc2ca4188bac950dc13e5 100644
--- a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
@@ -28,7 +28,7 @@ class SigmoidImplForward_cpu
     : public Registrable<SigmoidImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
 };
 class SigmoidImplBackward_cpu
-    : public Registrable<SigmoidImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<SigmoidImplBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, const void*, void*)> {
 };
 
 class SigmoidImpl_cpu : public OperatorImpl {
@@ -40,7 +40,10 @@ public:
     }
 
     Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    void forward() override;
+	
+    void forward() override final;
+
+    void backward() override final;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl_backward_kernels.hpp
new file mode 100755
index 0000000000000000000000000000000000000000..fa73644fda53c2678a23f09a111383e57c301f76
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/SigmoidImpl_backward_kernels.hpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_SIGMOIDIMPL_BACKWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_SIGMOIDIMPL_BACKWARD_KERNEL_H_
+
+#include <cstddef>  // std::size_t
+
+#include "aidge/backend/cpu/operator/SigmoidImpl.hpp"
+#include "aidge/utils/Registrar.hpp"
+
+namespace Aidge {
+template <class I, class O, class GI, class GO>
+void SigmoidImpl_cpu_backward_kernel(const std::size_t inputLenght,
+                                     const void* input_, const void* output_, const void* grad_output_,
+								     void* grad_input_) {
+	const I* input = static_cast<const I*>(input_);
+    const O* output = static_cast<const O*>(output_);
+    const GO* grad_output = static_cast<const GO*>(grad_output_);
+    GI* grad_input = static_cast<GI*>(grad_input_);
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        grad_input[i] = output[i] * (O(1) - output[i]) * grad_output[i];
+    }
+}
+
+namespace {
+static Registrar<SigmoidImplBackward_cpu> registrarSigmoidImplBackward_cpu_Float32(
+    {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
+    Aidge::SigmoidImpl_cpu_backward_kernel<float, float, float, float>);
+static Registrar<SigmoidImplBackward_cpu> registrarSigmoidImplBackward_cpu_Float64(
+    {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
+    Aidge::SigmoidImpl_cpu_backward_kernel<double, double, double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_SIGMOIDIMPL_BACKWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp
index a53650942540e6368855ffe19e2f7f651ab5b6bc..510bf9bb46523d4490cb3f4a53f1d951abb56882 100644
--- a/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp
@@ -19,15 +19,15 @@
 namespace Aidge {
 template <class I, class O>
 void SigmoidImpl_cpu_forward_kernel(std::size_t inputLenght,
-                                     const void* input_,
-                                     void* output_) {
+                                    const void* input_,
+                                    void* output_) {
 
     const I* input = static_cast<const I*>(input_);
     O* output = static_cast<O*>(output_);
 
 //#pragma omp parallel for if (inputLenght > 1024)
     for (std::size_t i = 0; i < inputLenght; ++i) {
-        output[i] = static_cast<O>(1.0) / (static_cast<O>(1.0) + std::exp(-input[i]));
+        output[i] = O(1) / (O(1) + std::exp(-input[i]));
     }
 }
 
diff --git a/include/aidge/backend/cpu/operator/TanhImpl.hpp b/include/aidge/backend/cpu/operator/TanhImpl.hpp
index 9e44f7bcd2b2392c634421478a096258b3e39795..a62cd0501eae6d7b755a77aa9e9f7c8430f04e6b 100644
--- a/include/aidge/backend/cpu/operator/TanhImpl.hpp
+++ b/include/aidge/backend/cpu/operator/TanhImpl.hpp
@@ -28,7 +28,7 @@ class TanhImplForward_cpu
     : public Registrable<TanhImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
 };
 class TanhImplBackward_cpu
-    : public Registrable<TanhImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<TanhImplBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, const void*, void*)> {
 };
 
 class TanhImpl_cpu : public OperatorImpl {
@@ -40,7 +40,10 @@ public:
     }
 
     Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    void forward() override;
+	
+    void forward() override final;
+
+    void backward() override final;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/TanhImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/TanhImpl_backward_kernels.hpp
new file mode 100755
index 0000000000000000000000000000000000000000..d07bf12b3771528d4015917ce1b7682af70bdab9
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/TanhImpl_backward_kernels.hpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_TANHIMPL_BACKWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_TANHIMPL_BACKWARD_KERNEL_H_
+
+#include <cstddef>  // std::size_t
+
+#include "aidge/backend/cpu/operator/TanhImpl.hpp"
+#include "aidge/utils/Registrar.hpp"
+
+namespace Aidge {
+template <class I, class O, class GI, class GO>
+void TanhImpl_cpu_backward_kernel(const std::size_t inputLenght,
+                                  const void* input_, const void* output_, const void* grad_output_,
+			  			          void* grad_input_) {
+	const I* input = static_cast<const I*>(input_);
+    const O* output = static_cast<const O*>(output_);
+    const GO* grad_output = static_cast<const GO*>(grad_output_);
+    GI* grad_input = static_cast<GI*>(grad_input_);
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        grad_input[i] = (O(1) - output[i] * output[i]) * grad_output[i];
+    }
+}
+
+namespace {
+static Registrar<TanhImplBackward_cpu> registrarTanhImplBackward_cpu_Float32(
+    {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
+    Aidge::TanhImpl_cpu_backward_kernel<float, float, float, float>);
+static Registrar<TanhImplBackward_cpu> registrarTanhImplBackward_cpu_Float64(
+    {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
+    Aidge::TanhImpl_cpu_backward_kernel<double, double, double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_TANHIMPL_BACKWARD_KERNEL_H_ */
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index 06859f09db169946175a93140e04f2e2a99e3362..8de2190e3249373d6f138be8ae649ff7f4d19692 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -28,13 +28,15 @@ Aidge::Elts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t
 }
 
 void Aidge::ReLUImpl_cpu::forward() {
-    std::shared_ptr<Tensor> in0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0));
+	const ReLU_Op& op_ = dynamic_cast<const ReLU_Op&>(mOp);
+    std::shared_ptr<Tensor> in0 = op_.getInput(0);
+    std::shared_ptr<Tensor> out0 = op_.getOutput(0);
     AIDGE_ASSERT(in0, "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<ReLUImplForward_cpu>::create({
         in0->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+	    out0->dataType()});
 
     // Call kernel
     kernelFunc(in0->size(),
@@ -43,20 +45,21 @@ void Aidge::ReLUImpl_cpu::forward() {
 }
 
 void Aidge::ReLUImpl_cpu::backward() {
-    // reversing in and out Tensors
-        const ReLU_Op& op_ = dynamic_cast<const ReLU_Op&>(mOp);
+    const ReLU_Op& op_ = dynamic_cast<const ReLU_Op&>(mOp);
+	std::shared_ptr<Tensor> in0  = op_.getInput(0);
     std::shared_ptr<Tensor> out0  = op_.getOutput(0);
-    std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();
     std::shared_ptr<Tensor> gra_int0 = op_.getInput(0)->grad();
-    AIDGE_ASSERT(out0, "current {} operator output#0 has not gradient Tensor.", op_.type());
+	std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();    
+    AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<ReLUImplBackward_cpu>::create({
+		in0->dataType(),
         out0->dataType(),
-        gra_out0->dataType(),
-        gra_int0->dataType()
+        gra_int0->dataType(),
+		gra_out0->dataType()
     });
 
     // Call kernel
-    kernelFunc(gra_int0->size(), getCPUPtr(out0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
+    kernelFunc(gra_int0->size(), getCPUPtr(in0), getCPUPtr(out0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
 }
diff --git a/src/operator/SigmoidImpl.cpp b/src/operator/SigmoidImpl.cpp
index dd7ec26cb36777f79d382c815b60d2381544a0bd..fe92ef43d0a526fcf711b8b8954b978b85437e85 100644
--- a/src/operator/SigmoidImpl.cpp
+++ b/src/operator/SigmoidImpl.cpp
@@ -21,6 +21,7 @@
 
 #include "aidge/backend/cpu/operator/SigmoidImpl.hpp"
 #include "aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/SigmoidImpl_backward_kernels.hpp"
 
 Aidge::Elts_t Aidge::SigmoidImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
@@ -28,15 +29,38 @@ Aidge::Elts_t Aidge::SigmoidImpl_cpu::getNbRequiredProtected(const Aidge::IOInde
 }
 
 void Aidge::SigmoidImpl_cpu::forward() {
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+	const Sigmoid_Op& op_ = dynamic_cast<const Sigmoid_Op&>(mOp);
+    std::shared_ptr<Tensor> in0 = op_.getInput(0);
+    std::shared_ptr<Tensor> out0 = op_.getOutput(0);
+    AIDGE_ASSERT(in0, "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<SigmoidImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+        in0->dataType(),
+	    out0->dataType()});
 
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+    kernelFunc(in0->size(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawOutput(0)));
 }
+
+void Aidge::SigmoidImpl_cpu::backward() {
+    const Sigmoid_Op& op_ = dynamic_cast<const Sigmoid_Op&>(mOp);
+	std::shared_ptr<Tensor> in0  = op_.getInput(0);
+    std::shared_ptr<Tensor> out0  = op_.getOutput(0);
+    std::shared_ptr<Tensor> gra_int0 = op_.getInput(0)->grad();		
+	std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();    
+    AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<SigmoidImplBackward_cpu>::create({
+		in0->dataType(),
+        out0->dataType(),
+		gra_int0->dataType(),
+        gra_out0->dataType()        
+    });
+
+    // Call kernel
+    kernelFunc(gra_int0->size(), getCPUPtr(in0), getCPUPtr(out0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
+}
\ No newline at end of file
diff --git a/src/operator/TanhImpl.cpp b/src/operator/TanhImpl.cpp
index 44e180739ed86e25d4be6d0beb693f73bdadbf35..8b5988e976534a701eb91782e5098ceeeb2a95ff 100644
--- a/src/operator/TanhImpl.cpp
+++ b/src/operator/TanhImpl.cpp
@@ -21,6 +21,7 @@
 
 #include "aidge/backend/cpu/operator/TanhImpl.hpp"
 #include "aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/TanhImpl_backward_kernels.hpp"
 
 Aidge::Elts_t Aidge::TanhImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
@@ -28,15 +29,39 @@ Aidge::Elts_t Aidge::TanhImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t
 }
 
 void Aidge::TanhImpl_cpu::forward() {
-    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
+	const Tanh_Op& op_ = dynamic_cast<const Tanh_Op&>(mOp);
+    std::shared_ptr<Tensor> in0 = op_.getInput(0);
+    std::shared_ptr<Tensor> out0 = op_.getOutput(0);
+    AIDGE_ASSERT(in0, "missing input #0");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<TanhImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+        in0->dataType(),
+	    out0->dataType()});
 
     // Call kernel
-    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(),
+    kernelFunc(in0->size(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawOutput(0)));
 }
+
+void Aidge::TanhImpl_cpu::backward() {
+    const Tanh_Op& op_ = dynamic_cast<const Tanh_Op&>(mOp);
+	std::shared_ptr<Tensor> in0  = op_.getInput(0);
+    std::shared_ptr<Tensor> out0  = op_.getOutput(0);
+    std::shared_ptr<Tensor> gra_int0 = op_.getInput(0)->grad();		
+	std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();    
+    AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<TanhImplBackward_cpu>::create({
+		in0->dataType(),
+        out0->dataType(),
+		gra_int0->dataType(),
+        gra_out0->dataType()        
+    });
+
+    // Call kernel
+    kernelFunc(gra_int0->size(), getCPUPtr(in0), getCPUPtr(out0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
+}
+