diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
index 5c842c26e30bc9e20115e8bbc3c3f27a1f0d277b..e36abe2a9d68a2b56ab1777aa04b0e911df514c8 100644
--- a/src/operator/SubImpl.cpp
+++ b/src/operator/SubImpl.cpp
@@ -29,9 +29,6 @@ void Aidge::SubImpl_cpu::forward() {
 
     // Find the correct kernel type
     const auto impl = Registrar<SubImpl_cpu>::create(getBestMatch(getRequiredSpec()));
-    Log::info("Sub Operator Kernel");
-    op_.getInput(0)->print();
-    op_.getInput(1)->print();
 
     // Call kernel
     impl.forward(op_.getInput(0)->dims(),
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index 1706438529b359620e6fc6be289245ab4e5cd077..cf663dbbe762626215fa34e2fe2133cb5536cb0d 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -10,16 +10,18 @@
  ********************************************************************************/
 
 #include <aidge/filler/Filler.hpp>
+#include <aidge/operator/FC.hpp>
 #include <catch2/catch_test_macros.hpp>
 #include <cmath>
 #include <cstdlib>
 #include <memory>
 #include <random>
 
-#include "aidge/backend/cpu/operator/ConvImpl.hpp"
+#include "aidge/backend/cpu/operator/ConvImpl.hpp" 
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Conv.hpp"
+#include "aidge/operator/FC.hpp"
 #include "aidge/operator/Identity.hpp"
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/MetaOperatorDefs.hpp"
@@ -29,6 +31,7 @@
 #include "aidge/scheduler/ParallelScheduler.hpp"
 #include "aidge/scheduler/SequentialScheduler.hpp"
 #include "aidge/utils/TensorUtils.hpp"
+#include "aidge/filler/Filler.hpp"
 
 using namespace Aidge;
 
@@ -211,6 +214,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
             PaddedConv(3, 4, {3, 3}, "myPaddedConv", {1, 1}, {1, 1, 1, 1});
     }
     SECTION("LSTM(forward)") {
+
         auto pop = Pop();
         auto myLSTM = LSTM(32, 64, 0, true, "ltsm");
         auto op =
@@ -279,6 +283,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
         REQUIRE(microGraphScheduler->getStaticScheduling(1).size() == 24);
         REQUIRE(microGraphScheduler->getStaticScheduling(15).size() == 24);
     }
+
     SECTION("LSTM(forward_values)") {
         auto myLSTM = LSTM(2, 3, 0, true, "ltsm");
         auto op =
@@ -348,6 +353,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
 
         REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState));
     }
+
     SECTION("LSTM(forward_values_seq)") {
         auto pop = Pop();
         auto myLSTM = LSTM(2, 3, 2, true, "ltsm");
@@ -413,6 +419,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
 
         REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState));
     }
+
     SECTION("LSTM(forward_values_seq_flatten)(sequential)") {
         auto pop = Pop();
         auto myLSTM = LSTM(2, 3, 2, true, "ltsm");
@@ -592,18 +599,103 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
     }
 
     SECTION("Leaky(forward)(fixed)") {
+        
+        constexpr auto inChannels = 10;
+        constexpr auto outChannels = 5;
 
-        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(
-            Array3D<float, 2, 3, 2>{{{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}},
-                                     {{2.0, 3.0}, {4.0, 5.0}, {6.0, 7.0}}}});
-
-
-        constexpr auto beta = 0.9;
+        constexpr auto beta = 0.95;
         constexpr auto threshold  = 1.0;
-        auto pop = Pop("pop");
-        auto leaky = Leaky(2, beta, threshold, "leaky");
+        constexpr auto nbTimeSteps = 2;
+
+
+        auto myWeights = std::make_shared<Tensor>(Array2D<float, outChannels, inChannels>{{
+            {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0},
+            {1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1},
+            {0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1, 0.2, 0.3, 0.4},
+            {0.4, 0.3, 0.2, 0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5},
+            {0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0},
+        }});
+
+        auto myWeights2 = std::make_shared<Tensor>(Array2D<float, inChannels, outChannels>{{
+            {0.1, 0.2, 0.3, 0.4, 0.5},
+            {0.6, 0.7, 0.8, 0.9, 1.0},
+            {1.0, 0.9, 0.8, 0.7, 0.6}, 
+            {0.5, 0.4, 0.3, 0.2, 0.1},
+            {0.5, 0.6, 0.7, 0.8, 0.9}, 
+            {1.0, 0.1, 0.2, 0.3, 0.4},
+            {0.4, 0.3, 0.2, 0.1, 0.0},
+            {0.1, 0.2, 0.3, 0.4, 0.5},
+            {0.9, 0.8, 0.7, 0.6, 0.5},
+            {0.4, 0.3, 0.2, 0.1, 0.0},
+        }});
+
+                auto myInput = std::make_shared<Tensor>(Array2D<float, 2, 10>{{
+            {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0},
+            {1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1},
+        }});
+
+        // py/snn Torch computed result, output of fc1 at time step 1
+        auto expectedOutputlif1ts1 = std::make_shared<Tensor>(Array2D<float,2,5>{{
+            {3.850, 2.2000, 2.6500, 1.5000, 1.6500},
+            {2.200, 3.8500, 3.4000, 1.2500, 3.3000},
+        }});
+
+        auto expectedOutputfc2ts1 = std::make_shared<Tensor>(Array2D<float,2,10>{{
+            {1.5000, 4.0000, 4.0000, 1.5000, 3.5000, 2.0000, 1.0000, 1.5000, 3.5000, 1.0000},
+            {1.5000, 4.0000, 4.0000, 1.5000, 3.5000, 2.0000, 1.0000, 1.5000, 3.5000, 1.0000},
+        }});
+
+        auto expectedOutputlif1ts2 = std::make_shared<Tensor>(Array2D<float,2,5>{{
+            {6.5075, 3.2900, 4.1675, 1.9250, 2.2175},
+            {3.2900, 6.5075, 5.6300, 1.4375, 5.4350},
+        }});
+
+        // NOTE: Same output as before, because for all channels, we have a potential higher than threshold.
+        // Thus the lif neuron fires at every timestep for every channel.
+        auto expectedOutputfc2ts2 = std::make_shared<Tensor>(Array2D<float,2,10>{{
+            {1.5000, 4.0000, 4.0000, 1.5000, 3.5000, 2.0000, 1.0000, 1.5000, 3.5000, 1.0000},
+            {1.5000, 4.0000, 4.0000, 1.5000, 3.5000, 2.0000, 1.0000, 1.5000, 3.5000, 1.0000},
+        }});
+
+        auto init = std::make_shared<Tensor>(Array2D<float, 2, 5>{});
+        uniformFiller<float>(init, 0.0, 0.0);
+
+
+        auto fc1 = FC(inChannels, outChannels, true, "myfc");
+        auto fc2 = FC(outChannels, inChannels, true, "fc2");
+        // NOTE: Account for init step by adding 1 to the max timestep parameter.
+        auto lif1 = Leaky(nbTimeSteps+1, beta, threshold, "leaky");
+
+        // associateInput() does not work
+        fc1->input(1).first->getOperator()->setOutput(0, myWeights);
+        fc2->input(1).first->getOperator()->setOutput(0, myWeights2);
+
+        auto fc1Op = std::static_pointer_cast<OperatorTensor>(fc1->getOperator());
+        auto lif1Op = std::static_pointer_cast<MetaOperator_Op>(lif1->getOperator());
+        auto fc2Op = std::static_pointer_cast<OperatorTensor>(fc2->getOperator());
+
+        fc1Op->associateInput(0, myInput);
+        lif1Op->associateInput(1, init);
+        lif1Op->associateInput(2, init);
+
+        fc1->addChild(lif1, 0, 0);
+        lif1->addChild(fc2, 1, 0);
+
+
+        auto g = std::make_shared<GraphView>();
+        g->add({fc1, lif1, fc2});
+        g->compile("cpu", DataType::Float32);
+        auto scheduler = SequentialScheduler(g);
 
-        REQUIRE(true);
+        // Forward 1 (simulate timestep 0)
+        scheduler.forward(true);
+        REQUIRE(approxEq<float>(*(lif1Op->getOutput(0)), *(expectedOutputlif1ts1)));
+        REQUIRE(approxEq<float>(*(fc2Op->getOutput(0)), *(expectedOutputfc2ts1)));
+
+        // Forward 1 (simulate timestep 1)
+        scheduler.forward(true);
+        REQUIRE(approxEq<float>(*(lif1Op->getOutput(0)), *(expectedOutputlif1ts2)));
+        REQUIRE(approxEq<float>(*(fc2Op->getOutput(0)), *(expectedOutputfc2ts2)));
     }
 
     SECTION("Leaky(forward)") {