diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 02237d1d5476cee7e6a7aaa3e8b448c54f53f3ab..e270a78b6a0f6ed036d7048bed47204a2228417b 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -166,7 +166,7 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels,
 std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
 
 std::shared_ptr<MetaOperator_Op> LeakyOp();
-std::shared_ptr<Node> Leaky(const std::string& name = "");
+std::shared_ptr<Node> Leaky(const int nbTimeSteps, const float threshold, const float beta, const std::string& name = "");
 
 }  // namespace Aidge
 
diff --git a/src/operator/MetaOperatorDefs/Leaky.cpp b/src/operator/MetaOperatorDefs/Leaky.cpp
index 98927e19b31e6b51b6c7254286e92eb8064eb131..450f2d1c3deb4bfb2e545f59af12b52258c5c654 100644
--- a/src/operator/MetaOperatorDefs/Leaky.cpp
+++ b/src/operator/MetaOperatorDefs/Leaky.cpp
@@ -13,13 +13,9 @@ namespace Aidge {
 constexpr auto memorizeOpDataOutputRecIndex = 1;
 constexpr auto memorizeOpDataOutputIndex = 0;
 
-std::shared_ptr<Node> Leaky(const std::string &name) {
+std::shared_ptr<Node> Leaky(const int nbTimeSteps, const float threshold, const float beta, const std::string &name) {
 
     Log::warn("! Lots of parameters are hardcoded");
-    const auto softReset = true;
-    const auto beta = 0.9;
-    const auto thresholdValue = 1.0;
-    const auto seqLength = 2;
 
     auto microGraph = std::make_shared<GraphView>();
 
@@ -31,21 +27,18 @@ std::shared_ptr<Node> Leaky(const std::string &name) {
     auto subNode2 = Sub(!name.empty() ? name + "_threshold" : "");
     auto reset = Mul(!name.empty() ? name + "_reset" : "");
 
-    // auto betaTensor = std::make_shared<Tensor>(Array2D<float, 16, 32>{});
-    //  FIXME: Use beta instead of a fixed value here, and put real dimensions
-    auto betaTensor = std::make_shared<Tensor>(Array2D<float, 3, 2>{});
+    auto betaTensor = std::make_shared<Tensor>(beta);
     auto uthTensor =
-        std::make_shared<Tensor>(static_cast<float>(thresholdValue));
-    uniformFiller<float>(betaTensor, beta, beta);
-    uniformFiller<float>(uthTensor, thresholdValue, thresholdValue);
+        std::make_shared<Tensor>(static_cast<float>(threshold));
+    uniformFiller<float>(uthTensor, threshold, threshold);
 
     auto decayRate = Producer(betaTensor, "leaky_beta", true);
     auto uth = Producer(uthTensor, "leaky_uth", true);
 
     auto potentialMem =
-        Memorize(seqLength, (!name.empty()) ? name + "_potential" : "");
+        Memorize(nbTimeSteps, (!name.empty()) ? name + "_potential" : "");
     auto spikeMem =
-        Memorize(seqLength, (!name.empty()) ? name + "_spike" : "");
+        Memorize(nbTimeSteps, (!name.empty()) ? name + "_spike" : "");
 
     // U[t] = Input[T] + beta * U[T-1] - S[T-1] * U_th
     // with S[T] = | 1, if U[T] - U_th > 0
@@ -59,20 +52,16 @@ std::shared_ptr<Node> Leaky(const std::string &name) {
     mulNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/1);
     inputNode->addChild(/*otherNode=*/addNode, /*outId=*/0, /*otherInId=*/0);
 
-    // S[T-1] * Uth
+    // S[T-1] * U_th
     spikeMem->addChild(reset,
                        /*outId=*/memorizeOpDataOutputRecIndex,
                        /*otherInId=*/0);
+
+    // TODO: Handle hard/soft reset
     uth->addChild(reset, 0, 1);
-    if (softReset) {
-        uth->addChild(reset, 0, 1);
-    } else {
-        // addNode->addChild(reset, 0, 1);
-        AIDGE_THROW_OR_ABORT(std::runtime_error,
-                             "Hard reset not implemented yet.");
-    }
-
-    // Input[T] + beta * U[T-1] - S[T-1] * Uth
+    
+
+    // Input[T] + beta * U[T-1] - S[T-1] * U_th
     addNode->addChild(subNode, 0, 0);
     reset->addChild(subNode, 0, 1);
 
@@ -103,6 +92,8 @@ std::shared_ptr<Node> Leaky(const std::string &name) {
     microGraph->setOrderedInputs(
         {{inputNode, 0}, {potentialMem, 1}, {spikeMem, 1}});
 
+    // NOTE: Outputs are NOT the memory nodes (as it is done in LSTM), to avoid producing data during init
+    // This way, we can plug a stack operator after or node, and get correct results
     microGraph->setOrderedOutputs({//{potentialMem, memorizeOpDataOutputIndex},
                                    //{spikeMem, memorizeOpDataOutputIndex}
                                    {addNode, 0},
@@ -113,8 +104,6 @@ std::shared_ptr<Node> Leaky(const std::string &name) {
                                /*forcedInputsCategory=*/{},
                                /*name*/ "leaky");
 
-    // addProducer(metaOp, 1, {1,2}, "memorizeInit1");
-    // addProducer(metaOp, 2, {1,2}, "memorizeInit2");
     return metaOp;
 }
 
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index 97aea24145a8c9ef809ebdd6051532100891daff..042b04f01bdc1430c8b9f1b9df6951f12b821ed1 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -148,15 +148,15 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
     }
 
     SECTION("Leaky") {
-        auto myLeaky = Leaky();
+        auto myLeaky = Leaky(10, 1.0, 0.9);
         auto op = std::static_pointer_cast<OperatorTensor>(myLeaky->getOperator());
 
-        // 2 inputs :
-        // 1 for the actual input data,
-        // 1 for the Memorize init tensor.
         auto inputs = myLeaky->inputs();
 
+        // Two memorize nodes + real data input
         REQUIRE(myLeaky->nbInputs() == 3);
+        // Outputs for spike and memory + 2 Memorize node 
+        REQUIRE(myLeaky->nbOutputs() == 4);
     	REQUIRE(true);
     }
 }