diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp
index ca04dff9164ecc8492d9263b32b60272dbbad395..cfb85ecfa6a4c65d89079dc23944d6d85d99a785 100644
--- a/include/aidge/backend/cpu/operator/AddImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl.hpp
@@ -33,8 +33,6 @@ using AddImpl_cpu = OperatorImpl_cpu<Add_Op,
          const std::vector<std::size_t>&, 
          const std::vector<std::size_t>&, 
          const void*, 
-         const void*, 
-         const void*, 
          void*, 
          void*)
 >;
diff --git a/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp b/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp
index d6fff9b58d381895350998e11bae01684718ad3e..4be47849db2fd5ee4e21d59a4f1199f13f60b3a9 100644
--- a/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp
@@ -154,15 +154,11 @@ void AddImpl_cpu_backward_kernel(const std::size_t input0Length,
                                const std::vector<std::size_t>& dims0,
                                const std::vector<std::size_t>& dims1,
                                const std::vector<std::size_t>& outputDims,
-                               const void* input0_,
-                               const void* input1_,
                                const void* grad_output_,
                                void* gradientInput0_,
                                void* gradientInput1_)
 {
     // TODO: Remove input0/1 from the function
-    const I* input0 = static_cast<const I*>(input0_);
-    const I* input1 = static_cast<const I*>(input1_);
     const O* gradOutput = static_cast<const O*>(grad_output_);
     auto* gradInput0 = static_cast<I*>(gradientInput0_);
     auto* gradInput1 = static_cast<I*>(gradientInput1_);
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index b027fb876c8e597c85d13fea0f1fb6dd1207a1d9..cff6128741db657136aca1006c0f273ce64aa87a 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -73,8 +73,6 @@ void Aidge::AddImpl_cpu::backward() {
                in0->dims(),
                in1->dims(),
                out0grad->dims(),
-               getCPUPtr(in0),
-               getCPUPtr(in1),
                getCPUPtr(out0grad),
                getCPUPtr(in0grad),
                getCPUPtr(in1grad));
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index 4fe396303c8cfae88c885c62b4479ee9c52fb54a..da1a4873136241cddea351996331c489f7476bdd 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -705,7 +705,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
         auto fc2 = FC(outChannels, inChannels, true, "fc2");
         // NOTE: Account for init step by adding 1 to the max timestep
         // parameter.
-        auto lif1 = Leaky(nbTimeSteps + 1, beta, threshold, "leaky");
+        auto lif1 = Leaky(nbTimeSteps + 1, beta, threshold, LeakyReset::Subtraction, "leaky");
 
         // associateInput() does not work
         fc1->input(1).first->getOperator()->setOutput(0, myWeights);
@@ -774,7 +774,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
         const auto nbTimeSteps = dims[0];
         const auto beta = betaDist(gen);
 
-        auto myLeaky = Leaky(nbTimeSteps, beta, 1.0, "leaky");
+        auto myLeaky = Leaky(nbTimeSteps, beta, 1.0, LeakyReset::Subtraction,"leaky");
         auto op =
             std::static_pointer_cast<MetaOperator_Op>(myLeaky->getOperator());
         // auto stack = Stack(2);