diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index 359452712f94be078122266089cc1da89baf50d5..841d3aee1f2a1b922441baf4c72387ff46073105 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -72,6 +72,11 @@ void Aidge::FCImpl_cpu::backward()
     const auto& input1grad = op_.getInput(1)->grad()->refCastFrom(input1gradFallback, *(op_.getOutput(0)));
     const auto& input2grad = (op_.getInput(2)) ? op_.getInput(2)->grad()->refCastFrom(input2gradFallback, *(op_.getOutput(0))) : Tensor();
 
+    Log::info(" ");
+    Log::info("(FCImpl.cpp) Gradient of output 0 : {}", fc_grad->toString());
+    Log::info("(FCImpl.cpp) Gradient of input  0 : {}", input0grad.toString());
+    Log::info("(FCImpl.cpp) Gradient of input  1 : {}", input1grad.toString());
+
     // Call kernel
     const auto batchSize = (input0grad.dims().size() > 1) ? input0grad.dims()[0] : 1;
     impl.backward(batchSize,
@@ -83,4 +88,8 @@ void Aidge::FCImpl_cpu::backward()
         input0grad.getImpl()->rawPtr(),
         input1grad.getImpl()->rawPtr(),
         (op_.getInput(2)) ? input2grad.getImpl()->rawPtr() : nullptr);
+
+    Log::info("(FCImpl.cpp) Gradient of output 0 : {}", fc_grad->toString());
+    Log::info("(FCImpl.cpp) Gradient of input  0 : {}", input0grad.toString());
+    Log::info("(FCImpl.cpp) Gradient of input  1 : {}", input1grad.toString());
 }
diff --git a/unit_tests/operator/Test_Memorize.cpp b/unit_tests/operator/Test_Memorize.cpp
index 8f6b30420aa84393280c08422c00b7162fa15da8..c3e1576ab258edbbec68e0aa63f844ed5a7ba801 100644
--- a/unit_tests/operator/Test_Memorize.cpp
+++ b/unit_tests/operator/Test_Memorize.cpp
@@ -25,7 +25,6 @@
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/operator/Add.hpp"
 #include "aidge/operator/Memorize.hpp"
-#include "aidge/operator/PerMemorize.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/recipes/GraphViewHelper.hpp"
 #include "aidge/scheduler/SequentialScheduler.hpp"
@@ -169,86 +168,4 @@ TEST_CASE("[cpu/operator] Memorize(backward)", "[Memorize][CPU]") {
     }
 }
 
-TEST_CASE("[cpu/operator] Memorize(forward)", "[Memorize][Periodic]") {
-    auto mul = Mul();
-
-    //auto mem = Memorize(/*endStep=*/3);
-    //auto mem = Identity();
-    auto mem = PerMemorize(2);
-
-    auto add = Add();
-    auto pop = Pop();
-    auto stack = Stack(3);
-
-    // Initialization tensor for Memorize
-    auto input = Producer(std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f})), "input");
-    auto decay = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.9f, 0.9f})), "decay");
-    auto init = Producer(std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})), "init");
-    auto back = std::make_shared<Tensor>(Array1D<float,2>({1.0f, 1.0f}));
-
-    auto initTensor = std::make_shared<Tensor>(Array2D<float,3,2>({
-        {
-            {1,1},
-            {1,1},
-            {1,1},
-        }
-    }));
-
-    std::static_pointer_cast<OperatorTensor>(pop->getOperator())->setInput(0, initTensor);
-
-    auto memOp = std::static_pointer_cast<OperatorTensor>(mem->getOperator());
-    memOp->setOutput(1, std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})));
-    memOp->setOutput(0, std::make_shared<Tensor>(Array1D<float,2>({0.0f, 0.0f})));
-
-    //init->addChild(mem,0,0);
-    mem->addChild(mul,1,0);
-    decay->addChild(mul,0,1);
-    mul->addChild(add,0,1);
-    pop->addChild(add,0,0);
-    add->addChild(mem,0,0);
-    mem->addChild(stack,1,0);
-
-    auto graphView = getConnectedGraphView(mem);
-    graphView->compile();
-
-    Log::info("GraphView output nodes : {}", graphView->outputNodes().size());
-    for(auto node : graphView->outputNodes())
-    {
-       Log::info("output node type : {}", node->type());
-    }
-    // TODO: Set ordered outputs for this node.
-
-    auto scheduler = SequentialScheduler(graphView);
-    scheduler.forward();
-
-    //std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
-    std::static_pointer_cast<OperatorTensor>(stack->getOperator())->getOutput(0)->print();
-    REQUIRE(true);
-}
-
-TEST_CASE("[cpu/operator] Memorize(forward)", "[Memorize][Periodic2]") {
-
-
-    auto input = Producer(std::make_shared<Tensor>(1.0f));
-    auto init = Producer(std::make_shared<Tensor>(1.0f));
-    auto add = Add();
-
-    //auto mem = PerMemorize(3);
-    auto mem = Memorize(3);
-
-    input->addChild(add,0,0);
-    add->addChild(mem,0,0);
-    mem->addChild(add, 1,1);
-    init->addChild(mem, 0, 1);
-
-    auto gv = getConnectedGraphView(mem);
-    gv->compile();
-
-    auto scheduler = SequentialScheduler(gv);
-    scheduler.forward();
-
-    std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
-
-    REQUIRE(true);
-}
 } // namespace Aidge
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index c42ca19a5e1db4dd0a4c723ef53b9bb7a4d9d384..3a3930fb793f6f3f10f038fbf4368a8f9bddc4e4 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -708,7 +708,8 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
         auto fc2 = FC(outChannels, inChannels, true, "fc2");
         // NOTE: Account for init step by adding 1 to the max timestep
         // parameter.
-        auto lif1 = Leaky(nbTimeSteps + 1, beta, threshold, LeakyReset::Subtraction, "leaky");
+        //auto lif1 = Leaky(nbTimeSteps + 1, beta, threshold, LeakyReset::Subtraction, "leaky");
+        auto lif1 = Leaky(nbTimeSteps, beta, threshold, LeakyReset::Subtraction, "leaky");
 
         // associateInput() does not work
         fc1->input(1).first->getOperator()->setOutput(0, myWeights);