diff --git a/include/aidge/backend/cpu/operator/ReLUImpl.hpp b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
index f8abfcf2a3f8c10ad1fe679738072684a47d6930..e2ebf44616db876b462157db650ff48362dd7bac 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
@@ -30,7 +30,7 @@ class ReLUImplForward_cpu
     : public Registrable<ReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
 };
 class ReLUImplBackward_cpu
-    : public Registrable<ReLUImplBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, const void*, void*)> {
+    : public Registrable<ReLUImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
 };
 
 class ReLUImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
index e67a4588fcdd238bd149468ae735492dde133419..1bd932e43608d98f737cc9046aed74b2fec6abc6 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
@@ -18,12 +18,11 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-template <class I, class O, class GI, class GO>
+template <class I, class GI, class GO>
 void ReLUImpl_cpu_backward_kernel(const std::size_t inputLenght,
-                                  const void* input_, const void* output_, const void* grad_output_,
-								  void* grad_input_) {
+                                  const void* input_, const void* grad_output_,
+				  void* grad_input_) {
     const I* input = static_cast<const I*>(input_);
-    //const O* output = static_cast<const O*>(output_);
     const GO* grad_output = static_cast<const GO*>(grad_output_);
     GI* grad_input = static_cast<GI*>(grad_input_);
     for (std::size_t i = 0; i < inputLenght; ++i) {
@@ -33,14 +32,14 @@ void ReLUImpl_cpu_backward_kernel(const std::size_t inputLenght,
 
 namespace {
 static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float32(
-    {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
-    Aidge::ReLUImpl_cpu_backward_kernel<float, float, float, float>);
+    {DataType::Float32, DataType::Float32, DataType::Float32},
+    Aidge::ReLUImpl_cpu_backward_kernel<float, float, float>);
 static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Int32(
-    {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
-    Aidge::ReLUImpl_cpu_backward_kernel<int, int, int, int>);
+    {DataType::Int32, DataType::Int32, DataType::Int32},
+    Aidge::ReLUImpl_cpu_backward_kernel<int, int, int>);
 static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float64(
-    {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
-    Aidge::ReLUImpl_cpu_backward_kernel<double, double, double, double>);
+    {DataType::Float64, DataType::Float64, DataType::Float64},
+    Aidge::ReLUImpl_cpu_backward_kernel<double, double, double>);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
index ed9ffe138ae0430d976dc2ca4188bac950dc13e5..34340e6166a48b465c7723e85d91c195bfb42277 100644
--- a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
@@ -28,7 +28,7 @@ class SigmoidImplForward_cpu
     : public Registrable<SigmoidImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
 };
 class SigmoidImplBackward_cpu
-    : public Registrable<SigmoidImplBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, const void*, void*)> {
+    : public Registrable<SigmoidImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
 };
 
 class SigmoidImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl_backward_kernels.hpp
index 931a2e4df129de2d85bad54cae009158fcffce9e..4ceb3bd7ed9a3fb739591eee488f8035770fef18 100644
--- a/include/aidge/backend/cpu/operator/SigmoidImpl_backward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/SigmoidImpl_backward_kernels.hpp
@@ -18,11 +18,10 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-template <class I, class O, class GI, class GO>
+template <class O, class GI, class GO>
 void SigmoidImpl_cpu_backward_kernel(const std::size_t inputLenght,
-                                     const void* input_, const void* output_, const void* grad_output_,
-								     void* grad_input_) {
-    //const I* input = static_cast<const I*>(input_);
+                                     const void* output_, const void* grad_output_,
+				     void* grad_input_) {
     const O* output = static_cast<const O*>(output_);
     const GO* grad_output = static_cast<const GO*>(grad_output_);
     GI* grad_input = static_cast<GI*>(grad_input_);
@@ -33,11 +32,11 @@ void SigmoidImpl_cpu_backward_kernel(const std::size_t inputLenght,
 
 namespace {
 static Registrar<SigmoidImplBackward_cpu> registrarSigmoidImplBackward_cpu_Float32(
-    {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
-    Aidge::SigmoidImpl_cpu_backward_kernel<float, float, float, float>);
+    {DataType::Float32, DataType::Float32, DataType::Float32},
+    Aidge::SigmoidImpl_cpu_backward_kernel<float, float, float>);
 static Registrar<SigmoidImplBackward_cpu> registrarSigmoidImplBackward_cpu_Float64(
-    {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
-    Aidge::SigmoidImpl_cpu_backward_kernel<double, double, double, double>);
+    {DataType::Float64, DataType::Float64, DataType::Float64},
+    Aidge::SigmoidImpl_cpu_backward_kernel<double, double, double>);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/TanhImpl.hpp b/include/aidge/backend/cpu/operator/TanhImpl.hpp
index a62cd0501eae6d7b755a77aa9e9f7c8430f04e6b..0bf851e77d94c160c0362301df33d682347daf0c 100644
--- a/include/aidge/backend/cpu/operator/TanhImpl.hpp
+++ b/include/aidge/backend/cpu/operator/TanhImpl.hpp
@@ -28,7 +28,7 @@ class TanhImplForward_cpu
     : public Registrable<TanhImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
 };
 class TanhImplBackward_cpu
-    : public Registrable<TanhImplBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, const void*, void*)> {
+    : public Registrable<TanhImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
 };
 
 class TanhImpl_cpu : public OperatorImpl {
diff --git a/include/aidge/backend/cpu/operator/TanhImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/TanhImpl_backward_kernels.hpp
index 3f49e4c4559506a7eff06a0a54eb4eacd6178643..3a13c2cad21c35822fc6248590550e4716ee046d 100644
--- a/include/aidge/backend/cpu/operator/TanhImpl_backward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/TanhImpl_backward_kernels.hpp
@@ -18,11 +18,10 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-template <class I, class O, class GI, class GO>
+template <class O, class GI, class GO>
 void TanhImpl_cpu_backward_kernel(const std::size_t inputLenght,
-                                  const void* input_, const void* output_, const void* grad_output_,
-			  			          void* grad_input_) {
-    //const I* input = static_cast<const I*>(input_);
+                                  const void* output_, const void* grad_output_,
+			          void* grad_input_) {
     const O* output = static_cast<const O*>(output_);
     const GO* grad_output = static_cast<const GO*>(grad_output_);
     GI* grad_input = static_cast<GI*>(grad_input_);
@@ -33,11 +32,11 @@ void TanhImpl_cpu_backward_kernel(const std::size_t inputLenght,
 
 namespace {
 static Registrar<TanhImplBackward_cpu> registrarTanhImplBackward_cpu_Float32(
-    {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
-    Aidge::TanhImpl_cpu_backward_kernel<float, float, float, float>);
+    {DataType::Float32, DataType::Float32, DataType::Float32},
+    Aidge::TanhImpl_cpu_backward_kernel<float, float, float>);
 static Registrar<TanhImplBackward_cpu> registrarTanhImplBackward_cpu_Float64(
-    {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
-    Aidge::TanhImpl_cpu_backward_kernel<double, double, double, double>);
+    {DataType::Float64, DataType::Float64, DataType::Float64},
+    Aidge::TanhImpl_cpu_backward_kernel<double, double, double>);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index 8de2190e3249373d6f138be8ae649ff7f4d19692..4a0fb9f5d929e2ce731a21b5553e1b9257a32daa 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -46,20 +46,19 @@ void Aidge::ReLUImpl_cpu::forward() {
 
 void Aidge::ReLUImpl_cpu::backward() {
     const ReLU_Op& op_ = dynamic_cast<const ReLU_Op&>(mOp);
-	std::shared_ptr<Tensor> in0  = op_.getInput(0);
+    std::shared_ptr<Tensor> in0  = op_.getInput(0);
     std::shared_ptr<Tensor> out0  = op_.getOutput(0);
     std::shared_ptr<Tensor> gra_int0 = op_.getInput(0)->grad();
-	std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();    
+    std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();    
     AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<ReLUImplBackward_cpu>::create({
-		in0->dataType(),
-        out0->dataType(),
+	in0->dataType(),
         gra_int0->dataType(),
-		gra_out0->dataType()
+	gra_out0->dataType()
     });
 
     // Call kernel
-    kernelFunc(gra_int0->size(), getCPUPtr(in0), getCPUPtr(out0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
+    kernelFunc(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
 }
diff --git a/src/operator/SigmoidImpl.cpp b/src/operator/SigmoidImpl.cpp
index fe92ef43d0a526fcf711b8b8954b978b85437e85..ad69935c02e392d7aa1c9601acb827c5baf8970f 100644
--- a/src/operator/SigmoidImpl.cpp
+++ b/src/operator/SigmoidImpl.cpp
@@ -47,20 +47,18 @@ void Aidge::SigmoidImpl_cpu::forward() {
 
 void Aidge::SigmoidImpl_cpu::backward() {
     const Sigmoid_Op& op_ = dynamic_cast<const Sigmoid_Op&>(mOp);
-	std::shared_ptr<Tensor> in0  = op_.getInput(0);
     std::shared_ptr<Tensor> out0  = op_.getOutput(0);
-    std::shared_ptr<Tensor> gra_int0 = op_.getInput(0)->grad();		
-	std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();    
+    std::shared_ptr<Tensor> gra_int0 = op_.getInput(0)->grad();
+    std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();    
     AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<SigmoidImplBackward_cpu>::create({
-		in0->dataType(),
         out0->dataType(),
-		gra_int0->dataType(),
+	gra_int0->dataType(),
         gra_out0->dataType()        
     });
 
     // Call kernel
-    kernelFunc(gra_int0->size(), getCPUPtr(in0), getCPUPtr(out0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
-}
\ No newline at end of file
+    kernelFunc(gra_int0->size(), getCPUPtr(out0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
+}
diff --git a/src/operator/TanhImpl.cpp b/src/operator/TanhImpl.cpp
index 8b5988e976534a701eb91782e5098ceeeb2a95ff..a2469ed9b83679c0edf8d0a761abf9d3d046db6e 100644
--- a/src/operator/TanhImpl.cpp
+++ b/src/operator/TanhImpl.cpp
@@ -47,21 +47,19 @@ void Aidge::TanhImpl_cpu::forward() {
 
 void Aidge::TanhImpl_cpu::backward() {
     const Tanh_Op& op_ = dynamic_cast<const Tanh_Op&>(mOp);
-	std::shared_ptr<Tensor> in0  = op_.getInput(0);
     std::shared_ptr<Tensor> out0  = op_.getOutput(0);
     std::shared_ptr<Tensor> gra_int0 = op_.getInput(0)->grad();		
-	std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();    
+    std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();    
     AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<TanhImplBackward_cpu>::create({
-		in0->dataType(),
         out0->dataType(),
-		gra_int0->dataType(),
+	gra_int0->dataType(),
         gra_out0->dataType()        
     });
 
     // Call kernel
-    kernelFunc(gra_int0->size(), getCPUPtr(in0), getCPUPtr(out0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
+    kernelFunc(gra_int0->size(), getCPUPtr(out0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
 }