diff --git a/include/aidge/backend/cpu/operator/FCImpl_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_kernels.hpp
index ab114dda2f617b73ae9213daffd551c2dfac0f31..0d2d9e450142a91173381937d31e1ccecf2c779c 100644
--- a/include/aidge/backend/cpu/operator/FCImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl_kernels.hpp
@@ -126,7 +126,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
                                 void* weightGrad_,
                                 void* biasesGrad_)
 {
-    Log::info("\t\t Call to FCImpl_cpu_backward_kernel");
     // FIXME: missing FC attributes as arguments
     const I* input  = static_cast<const I*>(input_);
     const I* originalInput  = static_cast<const I*>(originalInput_);
@@ -136,7 +135,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
     B* biasesGrad   = static_cast<B*>(biasesGrad_);
 
 
-    Log::info("\t\t Call to FCImpl_cpu_backward_kernel - Doing Bias grad");
     // bias grad
     if (biasesGrad == nullptr) { // no bias
         // Do nothing
@@ -151,7 +149,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
         }
     }
 
-    Log::info("\t\t Call to FCImpl_cpu_backward_kernel - Doing Weight grad");
     // weight grad
     for (std::size_t o = 0; o < outputFeatureSize; ++o) {
         for (std::size_t c = 0; c < inputFeatureSize; ++c) {
@@ -163,7 +160,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
         }
     }
 
-    Log::info("\t\t Call to FCImpl_cpu_backward_kernel - Doing Input grad");
     // input grad
     for (std::size_t b = 0; b < batchSize; ++b) {
         for (std::size_t c = 0; c < inputFeatureSize; ++c) {
@@ -174,8 +170,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
             output[b*inputFeatureSize + c]+= sum;
         }
     }
-
-    Log::info("\t\t Call to FCImpl_cpu_backward_kernel - Done");
 }
 
 // Kernels registration to implementation entry point
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index cff6128741db657136aca1006c0f273ce64aa87a..6fb0ab3d8762a03e840f157cb9a90d9bad177891 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -77,4 +77,8 @@ void Aidge::AddImpl_cpu::backward() {
                getCPUPtr(in0grad),
                getCPUPtr(in1grad));
 
+    Log::info(" ");
+    Log::info("(AddImpl.cpp) Gradient of output 0 : {}", out0grad->toString());
+    Log::info("(AddImpl.cpp) Gradient of input  0 : {}", in0grad->toString());
+    Log::info("(AddImpl.cpp) Gradient of input  1 : {}", in1grad->toString());
 }
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index 53502be1525d19ac105cd2afa046b68c75835a02..841d3aee1f2a1b922441baf4c72387ff46073105 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -55,7 +55,6 @@ void Aidge::FCImpl_cpu::forward()
 template <>
 void Aidge::FCImpl_cpu::backward()
 {
-    Log::notice("\t Call to Aidge::FCIMpl_cpu::backward()");
     const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
     const auto& fc_grad = op_.getOutput(0)->grad();
     AIDGE_ASSERT(fc_grad, "missing ouput #0 gradient");
@@ -63,7 +62,6 @@ void Aidge::FCImpl_cpu::backward()
     AIDGE_ASSERT(op_.getInput(1)->grad(), "missing input #1 gradient");
 
     const auto impl = Registrar<FCImpl_cpu>::create(getBestMatch(getRequiredSpec()));
-    Log::notice("\t Call to Aidge::FCIMpl_cpu::backward() - Found impl in registrar");
 
     // Convert input data (no overhead if not needed!)
     // TODO: right now, if needed, memory will be allocated/deallocated at each
@@ -74,9 +72,13 @@ void Aidge::FCImpl_cpu::backward()
     const auto& input1grad = op_.getInput(1)->grad()->refCastFrom(input1gradFallback, *(op_.getOutput(0)));
     const auto& input2grad = (op_.getInput(2)) ? op_.getInput(2)->grad()->refCastFrom(input2gradFallback, *(op_.getOutput(0))) : Tensor();
 
+    Log::info(" ");
+    Log::info("(FCImpl.cpp) Gradient of output 0 : {}", fc_grad->toString());
+    Log::info("(FCImpl.cpp) Gradient of input  0 : {}", input0grad.toString());
+    Log::info("(FCImpl.cpp) Gradient of input  1 : {}", input1grad.toString());
+
     // Call kernel
     const auto batchSize = (input0grad.dims().size() > 1) ? input0grad.dims()[0] : 1;
-    Log::notice("\t Call to Aidge::FCIMpl_cpu::backward() - Retrieved grad pointers");
     impl.backward(batchSize,
         input1grad.dims()[1], // nb input features
         input1grad.dims()[0], // nb output features
@@ -87,5 +89,7 @@ void Aidge::FCImpl_cpu::backward()
         input1grad.getImpl()->rawPtr(),
         (op_.getInput(2)) ? input2grad.getImpl()->rawPtr() : nullptr);
 
-    Log::notice("\t Call to Aidge::FCIMpl_cpu::backward() - Done");
+    Log::info("(FCImpl.cpp) Gradient of output 0 : {}", fc_grad->toString());
+    Log::info("(FCImpl.cpp) Gradient of input  0 : {}", input0grad.toString());
+    Log::info("(FCImpl.cpp) Gradient of input  1 : {}", input1grad.toString());
 }
diff --git a/src/operator/MulImpl.cpp b/src/operator/MulImpl.cpp
index b8bdd161af25a87937b066fc2264ecf1a9816da0..b09165633fa28dd5bbaf7080e0f18cc6ee63b966 100644
--- a/src/operator/MulImpl.cpp
+++ b/src/operator/MulImpl.cpp
@@ -41,7 +41,6 @@ void Aidge::MulImpl_cpu::forward() {
 
 template <>
 void Aidge::MulImpl_cpu::backward() {
-    Log::info("MulImpl_cpu::backward()");
     const Mul_Op& op_ = dynamic_cast<const Mul_Op&>(mOp);
 
     auto in0 = op_.getInput(0);
@@ -59,17 +58,15 @@ void Aidge::MulImpl_cpu::backward() {
                /* grad0Length  */ out0grad->size(),
                /* input0Dims   */ in0->dims(),
                /* input1Dims   */ in1->dims(),
-               out0grad->dims(),
-               getCPUPtr(in0),
-               getCPUPtr(in1),
-               getCPUPtr(out0grad),
+               /* outputDims   */ out0grad->dims(),
+               /* input0_      */ getCPUPtr(in0),
+               /* input1_      */ getCPUPtr(in1),
+               /* grad_output_ */ getCPUPtr(out0grad),
                getCPUPtr(in0grad),
                getCPUPtr(in1grad));
 
-    Log::info("MulImpl_cpu::backward() : Gradient of output 0");
-    out0grad->print();
-    Log::info("MulImpl_cpu::backward() : Gradient of input 0");
-    in0grad->print();
-    Log::info("MulImpl_cpu::backward() : Gradient of input 1");
-    in1grad->print();
+    Log::info(" ");
+    Log::info("(MulImpl_cpu::backward()) After Mul Backward pass.");
+    Log::info("(MulImpl_cpu::backward()) Gradient of input 0  : {}", in0grad->toString());
+    Log::info("(MulImpl_cpu::backward()) Gradient of input 1  : {}", in1grad->toString());
 }
diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
index 3601675206e5a86cf9c35316b5dd3daa102c59bd..8dc32f1638c936e368e58341f352ad9072293e9f 100644
--- a/src/operator/SubImpl.cpp
+++ b/src/operator/SubImpl.cpp
@@ -43,10 +43,10 @@ template <>
 void Aidge::SubImpl_cpu::backward() {
 
     const Sub_Op& op_ = dynamic_cast<const Sub_Op&>(mOp);
-    //Log::info("SubImpl_cpu::backward() : Node {}", op_.name());
 
     auto in0 = op_.getInput(0);
     auto in1 = op_.getInput(1);
+    auto out0 = op_.getOutput(0);
     auto in0grad = op_.getInput(0)->grad();
     auto in1grad = op_.getInput(1)->grad();
     auto out0grad = op_.getOutput(0)->grad();
@@ -55,9 +55,11 @@ void Aidge::SubImpl_cpu::backward() {
     const auto impl = Registrar<SubImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
 
-    in0grad->print();
-    in1grad->print();
-    out0grad->print();
+    Log::info(" ");
+    Log::info("(SubImpl.cpp) Gradient of input  0 : {}", in0grad->toString());
+    Log::info("(SubImpl.cpp) Gradient of input  1 : {}", in1grad->toString());
+    Log::info("(SubImpl.cpp) Gradient of output 0 : {}", out0grad->toString());
+
 
     // Call kernel
     impl.backward(/* input0Length */ in0grad->size(),
@@ -71,10 +73,7 @@ void Aidge::SubImpl_cpu::backward() {
                   /* gradInput1   */ getCPUPtr(in1grad));
 
 
-    Log::info("SubImpl_cpu::backward() : Gradient of output 0");
-    out0grad->print();
-    Log::info("SubImpl_cpu::backward() : Gradient of input 0");
-    in0grad->print();
-    Log::info("SubImpl_cpu::backward() : Gradient of input 1");
-    in1grad->print();
+    Log::info("(SubImpl.cpp) After backward.");
+    Log::info("(SubImpl.cpp) Gradient of input 0  : {}", in0grad->toString());
+    Log::info("(SubImpl.cpp) Gradient of input 1  : {}", in1grad->toString());
 }
diff --git a/unit_tests/operator/Test_Memorize.cpp b/unit_tests/operator/Test_Memorize.cpp
index 8f6b30420aa84393280c08422c00b7162fa15da8..c3e1576ab258edbbec68e0aa63f844ed5a7ba801 100644
--- a/unit_tests/operator/Test_Memorize.cpp
+++ b/unit_tests/operator/Test_Memorize.cpp
@@ -25,7 +25,6 @@
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/operator/Add.hpp"
 #include "aidge/operator/Memorize.hpp"
-#include "aidge/operator/PerMemorize.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/recipes/GraphViewHelper.hpp"
 #include "aidge/scheduler/SequentialScheduler.hpp"
@@ -169,86 +168,4 @@ TEST_CASE("[cpu/operator] Memorize(backward)", "[Memorize][CPU]") {
     }
 }
 
-TEST_CASE("[cpu/operator] Memorize(forward)", "[Memorize][Periodic]") {
-    auto mul = Mul();
-
-    //auto mem = Memorize(/*endStep=*/3);
-    //auto mem = Identity();
-    auto mem = PerMemorize(2);
-
-    auto add = Add();
-    auto pop = Pop();
-    auto stack = Stack(3);
-
-    // Initialization tensor for Memorize
-    auto input = Producer(std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f})), "input");
-    auto decay = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.9f, 0.9f})), "decay");
-    auto init = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})), "init");
-    auto back = std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f}));
-
-    auto initTensor = std::make_shared<Tensor>(Array2D<float,3,2>({
-        {
-            {1,1},
-            {1,1},
-            {1,1},
-        }
-    }));
-
-    std::static_pointer_cast<OperatorTensor>(pop->getOperator())->setInput(0, initTensor);
-
-    auto memOp = std::static_pointer_cast<OperatorTensor>(mem->getOperator());
-    memOp->setOutput(1, std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})));
-    memOp->setOutput(0, std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})));
-
-    //init->addChild(mem,0,0);
-    mem->addChild(mul,1,0);
-    decay->addChild(mul,0,1);
-    mul->addChild(add,0,1);
-    pop->addChild(add,0,0);
-    add->addChild(mem,0,0);
-    mem->addChild(stack,1,0);
-
-    auto graphView = getConnectedGraphView(mem);
-    graphView->compile();
-
-    Log::info("GraphView output nodes : {}", graphView->outputNodes().size());
-    for(auto node : graphView->outputNodes())
-    {
-       Log::info("output node type : {}", node->type());
-    }
-    // TODO: Set ordered outputs for this node.
-
-    auto scheduler = SequentialScheduler(graphView);
-    scheduler.forward();
-
-    //std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
-    std::static_pointer_cast<OperatorTensor>(stack->getOperator())->getOutput(0)->print();
-    REQUIRE(true);
-}
-
-TEST_CASE("[cpu/operator] Memorize(forward)", "[Memorize][Periodic2]") {
-
-
-    auto input = Producer(std::make_shared<Tensor>(1.0f));
-    auto init = Producer(std::make_shared<Tensor>(1.0f));
-    auto add = Add();
-
-    //auto mem = PerMemorize(3);
-    auto mem = Memorize(3);
-
-    input->addChild(add,0,0);
-    add->addChild(mem,0,0);
-    mem->addChild(add, 1,1);
-    init->addChild(mem, 0, 1);
-
-    auto gv = getConnectedGraphView(mem);
-    gv->compile();
-
-    auto scheduler = SequentialScheduler(gv);
-    scheduler.forward();
-
-    std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
-
-    REQUIRE(true);
-}
 } // namespace Aidge
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index 4a4dc22dafe6cbb5a880a985df3c7690a67e77a9..3a3930fb793f6f3f10f038fbf4368a8f9bddc4e4 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -708,7 +708,8 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
         auto fc2 = FC(outChannels, inChannels, true, "fc2");
         // NOTE: Account for init step by adding 1 to the max timestep
         // parameter.
-        auto lif1 = Leaky(nbTimeSteps + 1, beta, threshold, LeakyReset::Subtraction, "leaky");
+        //auto lif1 = Leaky(nbTimeSteps + 1, beta, threshold, LeakyReset::Subtraction, "leaky");
+        auto lif1 = Leaky(nbTimeSteps, beta, threshold, LeakyReset::Subtraction, "leaky");
 
         // associateInput() does not work
         fc1->input(1).first->getOperator()->setOutput(0, myWeights);
@@ -858,7 +859,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
     constexpr auto inChannels = 4;
     constexpr auto outChannels = 4;
 
-    constexpr auto beta = 1.0;
+    constexpr auto beta = 0.8;
     constexpr auto threshold = 1.0;
     constexpr auto nbTimeSteps = 2;
 
@@ -892,6 +893,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
     auto lif1 = Leaky(nbTimeSteps, beta, threshold, LeakyReset::Subtraction, "leaky");
     auto lif1Op = std::static_pointer_cast<OperatorTensor>(lif1->getOperator());
 
+
     auto fc1Op = std::static_pointer_cast<OperatorTensor>(fc1->getOperator());
     //fc1Op->associateInput(0, input);
 
@@ -913,10 +915,24 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
     graph->compile("cpu", DataType::Float32);
     graph->forwardDims();
 
+    Log::info("Lif 1 op tensors : \
+            \n Input 0  : {} \
+            \n Input 1  : {} \
+            \n Output 0 : {} \
+            \n Output 1 : {}",
+            static_cast<void*>(lif1Op->getInput(0).get()),
+            static_cast<void*>(lif1Op->getInput(1).get()),
+            static_cast<void*>(lif1Op->getOutput(0).get()),
+            static_cast<void*>(lif1Op->getOutput(1).get())
+        );
+
     auto scheduler = SequentialScheduler(graph);
     REQUIRE_NOTHROW(scheduler.generateScheduling());
     REQUIRE_NOTHROW(scheduler.forward(true));
 
+    // Modify graph here -- COuld also be checked by type
+    auto mGraph = graph->getNode("leaky");
+
     // Print output
     //Log::notice("FC1 op output 0");
     //fc1Op->getOutput(0)->print();