diff --git a/include/aidge/backend/cpu/operator/FCImpl_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_kernels.hpp
index 7bf2ea9f1613372640611f6e436278350c9e299d..8944099ddbc7ccfaedfabec9d7295fe3ddcadbb0 100644
--- a/include/aidge/backend/cpu/operator/FCImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl_kernels.hpp
@@ -136,7 +136,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
     B* biasesGrad   = static_cast<B*>(biasesGrad_);
 
 
-    Log::info("\t\t Call to FCImpl_cpu_backward_kernel - Doing Bias grad");
     // bias grad
     if (biasesGrad == nullptr) { // no bias
         // Do nothing
@@ -151,7 +150,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
         }
     }
 
-    Log::info("\t\t Call to FCImpl_cpu_backward_kernel - Doing Weight grad");
     // weight grad
     for (std::size_t o = 0; o < outputFeatureSize; ++o) {
         for (std::size_t c = 0; c < inputFeatureSize; ++c) {
@@ -163,7 +161,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
         }
     }
 
-    Log::info("\t\t Call to FCImpl_cpu_backward_kernel - Doing Input grad");
     // input grad
     for (std::size_t b = 0; b < batchSize; ++b) {
         for (std::size_t c = 0; c < inputFeatureSize; ++c) {
diff --git a/include/aidge/backend/cpu/operator/MulImpl_kernels.hpp b/include/aidge/backend/cpu/operator/MulImpl_kernels.hpp
index 36acb9199c51e900287ca9b262322aa86287d838..1c71ca6399853aaec74acc070fb2d75b4d5d8aeb 100644
--- a/include/aidge/backend/cpu/operator/MulImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl_kernels.hpp
@@ -194,6 +194,7 @@ void MulImpl_cpu_backward_kernel(const std::size_t input0Length,
         auto idx0 = getFlattenedIndex(broadcastedDims0, idxInput0);
         auto idx1 = getFlattenedIndex(broadcastedDims1, idxInput1);
 
+        Log::info("Multiplication by {}", input1[idx1]);
         grad_input_0[idx0] += static_cast<I1>(grad_output[i] * input1[idx1]);
         grad_input_1[idx1] += static_cast<I2>(grad_output[i] * input0[idx0]);
     }
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index cff6128741db657136aca1006c0f273ce64aa87a..6800d7e0bcdf8e99060db953d855a207262b2e38 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -77,4 +77,10 @@ void Aidge::AddImpl_cpu::backward() {
                getCPUPtr(in0grad),
                getCPUPtr(in1grad));
 
+    Log::info("AddImpl_cpu::backward() : Gradient of output 0");
+    out0grad->print();
+    Log::info("AddImpl_cpu::backward() : Gradient of input 0");
+    in0grad->print();
+    Log::info("AddImpl_cpu::backward() : Gradient of input 1");
+    in1grad->print();
 }
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index 53502be1525d19ac105cd2afa046b68c75835a02..cef4dae2b46907158036ebd31848d169f5e74e85 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -55,7 +55,7 @@ void Aidge::FCImpl_cpu::forward()
 template <>
 void Aidge::FCImpl_cpu::backward()
 {
-    Log::notice("\t Call to Aidge::FCIMpl_cpu::backward()");
+    Log::info("\t Call to Aidge::FCIMpl_cpu::backward()");
     const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
     const auto& fc_grad = op_.getOutput(0)->grad();
     AIDGE_ASSERT(fc_grad, "missing ouput #0 gradient");
@@ -63,7 +63,6 @@ void Aidge::FCImpl_cpu::backward()
     AIDGE_ASSERT(op_.getInput(1)->grad(), "missing input #1 gradient");
 
     const auto impl = Registrar<FCImpl_cpu>::create(getBestMatch(getRequiredSpec()));
-    Log::notice("\t Call to Aidge::FCIMpl_cpu::backward() - Found impl in registrar");
 
     // Convert input data (no overhead if not needed!)
     // TODO: right now, if needed, memory will be allocated/deallocated at each
@@ -76,7 +75,6 @@ void Aidge::FCImpl_cpu::backward()
 
     // Call kernel
     const auto batchSize = (input0grad.dims().size() > 1) ? input0grad.dims()[0] : 1;
-    Log::notice("\t Call to Aidge::FCIMpl_cpu::backward() - Retrieved grad pointers");
     impl.backward(batchSize,
         input1grad.dims()[1], // nb input features
         input1grad.dims()[0], // nb output features
diff --git a/src/operator/MulImpl.cpp b/src/operator/MulImpl.cpp
index b8bdd161af25a87937b066fc2264ecf1a9816da0..7d640805b1498f77ea376d6d5aadf471e28d37b9 100644
--- a/src/operator/MulImpl.cpp
+++ b/src/operator/MulImpl.cpp
@@ -52,6 +52,10 @@ void Aidge::MulImpl_cpu::backward() {
 
     // Find the correct kernel type
     const auto impl = Registrar<MulImpl_cpu>::create(getBestMatch(getRequiredSpec()));
+    Log::info("Debug printing in 1 : ");
+    in1->print();
+    Log::info("Debug printing in 0 : ");
+    in0->print();
 
     // Call kernel
     impl.backward(/* input0Length */ in0grad->size(),
@@ -59,10 +63,10 @@ void Aidge::MulImpl_cpu::backward() {
                /* grad0Length  */ out0grad->size(),
                /* input0Dims   */ in0->dims(),
                /* input1Dims   */ in1->dims(),
-               out0grad->dims(),
-               getCPUPtr(in0),
-               getCPUPtr(in1),
-               getCPUPtr(out0grad),
+               /* outputDims   */ out0grad->dims(),
+               /* input0_      */ getCPUPtr(in0),
+               /* input1_      */ getCPUPtr(in1),
+               /* grad_output_ */ getCPUPtr(out0grad),
                getCPUPtr(in0grad),
                getCPUPtr(in1grad));
 
diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
index 3601675206e5a86cf9c35316b5dd3daa102c59bd..c474394f88cd9aa6331d65f061ab0fc0278bda80 100644
--- a/src/operator/SubImpl.cpp
+++ b/src/operator/SubImpl.cpp
@@ -47,6 +47,7 @@ void Aidge::SubImpl_cpu::backward() {
 
     auto in0 = op_.getInput(0);
     auto in1 = op_.getInput(1);
+    auto out0 = op_.getOutput(0);
     auto in0grad = op_.getInput(0)->grad();
     auto in1grad = op_.getInput(1)->grad();
     auto out0grad = op_.getOutput(0)->grad();
@@ -58,6 +59,9 @@ void Aidge::SubImpl_cpu::backward() {
     in0grad->print();
     in1grad->print();
     out0grad->print();
+    Log::info("Sub Impl, pointer of out0 : {}" , static_cast<void*>(out0.get()));
+    out0->print();
+
 
     // Call kernel
     impl.backward(/* input0Length */ in0grad->size(),
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index 4a4dc22dafe6cbb5a880a985df3c7690a67e77a9..fa4102405c815134d664989c993bee020c428d24 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -858,7 +858,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
     constexpr auto inChannels = 4;
     constexpr auto outChannels = 4;
 
-    constexpr auto beta = 1.0;
+    constexpr auto beta = 0.8;
     constexpr auto threshold = 1.0;
     constexpr auto nbTimeSteps = 2;
 
@@ -892,6 +892,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
     auto lif1 = Leaky(nbTimeSteps, beta, threshold, LeakyReset::Subtraction, "leaky");
     auto lif1Op = std::static_pointer_cast<OperatorTensor>(lif1->getOperator());
 
+
     auto fc1Op = std::static_pointer_cast<OperatorTensor>(fc1->getOperator());
     //fc1Op->associateInput(0, input);
 
@@ -913,6 +914,17 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
     graph->compile("cpu", DataType::Float32);
     graph->forwardDims();
 
+    Log::info("Lif 1 op tensors : \
+            \n Input 0  : {} \
+            \n Input 1  : {} \
+            \n Output 0 : {} \
+            \n Output 1 : {}",
+            static_cast<void*>(lif1Op->getInput(0).get()),
+            static_cast<void*>(lif1Op->getInput(1).get()),
+            static_cast<void*>(lif1Op->getOutput(0).get()),
+            static_cast<void*>(lif1Op->getOutput(1).get())
+        );
+
     auto scheduler = SequentialScheduler(graph);
     REQUIRE_NOTHROW(scheduler.generateScheduling());
     REQUIRE_NOTHROW(scheduler.forward(true));