diff --git a/aidge_backend_cpu/unit_tests/test_scheduler.py b/aidge_backend_cpu/unit_tests/test_scheduler.py
index bc76620390b7563f0088f4c600b612bbe827b170..d8cf3e164da4bd34273905b0b0e156cf057635a5 100644
--- a/aidge_backend_cpu/unit_tests/test_scheduler.py
+++ b/aidge_backend_cpu/unit_tests/test_scheduler.py
@@ -36,7 +36,54 @@ class test_scheduler(unittest.TestCase):
         for i in range(len(expected_out)):
             self.assertEqual(expected_out[i], out_tensor[i])
 
+    def test_sequential_scheduling(self):
+        input_data =  np.array([]).astype(np.float32)
+        input_tensor = aidge_core.Tensor(input_data)
 
+        input_node = aidge_core.Producer(input_tensor, "X")
+
+        graph_view = aidge_core.sequential([
+            aidge_core.FC(50, name='0'),
+            aidge_core.FC(50, name='1'),
+            aidge_core.FC(10, name='2'),
+        ])
+        EXPECTED_SCHEDULE = ['0', '1', '2']
+
+        input_node.add_child(graph_view)
+        input_node.get_operator().set_datatype(aidge_core.DataType.Float32)
+        input_node.get_operator().set_backend("cpu")
+        graph_view.set_datatype(aidge_core.DataType.Float32)
+        graph_view.set_backend("cpu")
+
+        scheduler = aidge_core.SequentialScheduler(graph_view)
+        scheduler.generate_scheduling()
+
+        self.assertListEqual([i.name() for i in scheduler.get_static_scheduling()], EXPECTED_SCHEDULE)
+
+
+    def test_parallel_scheduling(self):
+        input_data =  np.array([]).astype(np.float32)
+        input_tensor = aidge_core.Tensor(input_data)
+
+        input_node = aidge_core.Producer(input_tensor, "X")
+        graph_view = aidge_core.sequential([
+            aidge_core.FC(50, name='0'),
+            aidge_core.parallel([aidge_core.FC(50, name='1'), aidge_core.FC(50, name='3')]),
+            aidge_core.Add(name='2'),
+        ])
+
+        EXPECTED_SCHEDULE = [['0', '1', '3', '2'],  ['0', '3', '1', '2']] # Both scheduling are valid !
+
+        input_node.add_child(graph_view)
+        input_node.get_operator().set_datatype(aidge_core.DataType.Float32)
+        input_node.get_operator().set_backend("cpu")
+        graph_view.set_datatype(aidge_core.DataType.Float32)
+        graph_view.set_backend("cpu")
+
+        scheduler = aidge_core.SequentialScheduler(graph_view)
+        scheduler.generate_scheduling()
+
+        self.assertTrue([i.name() for i in scheduler.get_static_scheduling()] in EXPECTED_SCHEDULE)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp
index 4df4f18a04c5aa473a0f8bc988aedeb5cba3826d..6e1cd03a3af81ee85f4f9e0e212af7c02089734e 100644
--- a/include/aidge/backend/cpu/operator/AddImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl.hpp
@@ -97,6 +97,7 @@ class AddImpl_cpu : public OperatorImpl {
         assert(outputIdx < mNbProducedData.size());
         return mNbProducedData[outputIdx];
     }
+    void updateConsummerProducer() override final;
 
     void forward() {
         // nothing
@@ -130,6 +131,7 @@ class AddImpl_cpu<1> : public OperatorImpl {
     NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
 
     NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -161,6 +163,7 @@ class AddImpl_cpu<2> : public OperatorImpl {
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
 
     NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -191,6 +194,7 @@ class AddImpl_cpu<3> : public OperatorImpl {
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
 
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -204,4 +208,4 @@ static Registrar<Add_Op<3>> registrarAddImpl3I_cpu("cpu", Aidge::AddImpl_cpu<3>:
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
index 429d428587ac3c893480c6661d311f0bfa7c785b..8373cb84a550efd8741a2dbc04c1e94ad37fe611 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
@@ -54,6 +54,7 @@ class AvgPoolingImpl2D_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -66,4 +67,4 @@ static Registrar<AvgPooling_Op<2>> registrarAvgPoolingImpl2D_cpu("cpu", Aidge::A
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
index 567a1580a7726b40a07f2a65a90bed5750b0e0f2..d9f25b4a8e38510f82fc5afe9ed4b656197a47d5 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
@@ -69,6 +69,7 @@ class BatchNormImpl2D_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -81,4 +82,4 @@ static Registrar<BatchNorm_Op<2>> registrarBatchNormImpl2D_cpu("cpu", Aidge::Bat
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_BATCHNORMIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_BATCHNORMIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
index e54025dbd0b72225d33ec660eb8373ee9014da89..0d21c676d797b2fc4e95c4aea47674c8fca5eef4 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
@@ -56,6 +56,7 @@ class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -68,4 +69,4 @@ static Registrar<ConvDepthWise_Op<2>> registrarConvDepthWiseImpl2D_cpu("cpu", Ai
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp
index d6216cde05d74a76dc325540ce4d82745ffe2b2a..1f3dffe43b966bc37887f267cc56760a899476f9 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp
@@ -56,6 +56,7 @@ class ConvImpl2D_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -68,4 +69,4 @@ static Registrar<Conv_Op<2>> registrarConvImpl2D_cpu("cpu", Aidge::ConvImpl2D_cp
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_CONVIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_CONVIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp
index 9c577216a75b0c97180f7a6fc19b70b0cfa34e09..c69cc0b08a58877108c78d6f12c29e9089c2f665 100644
--- a/include/aidge/backend/cpu/operator/FCImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl.hpp
@@ -50,7 +50,7 @@ class FCImpl_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-
+	void updateConsummerProducer() override final;
     void forward();
 
     void backward();
@@ -61,4 +61,4 @@ static Registrar<FC_Op> registrarFCImpl_cpu("cpu", Aidge::FCImpl_cpu::create);
 }
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_FCIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_FCIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
index b817306e2bc27329207572c15e8cdcb8f3ccce78..abe167bea16de01f861beb9701f747d39f265d9d 100644
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
@@ -49,7 +49,7 @@ class LeakyReLUImpl_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-
+    void updateConsummerProducer() override final;
     void forward();
 
     void backward();
@@ -60,4 +60,4 @@ static Registrar<LeakyReLU_Op> registrarLeakyReLUImpl_cpu("cpu", Aidge::LeakyReL
 }
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ProducerImpl.hpp b/include/aidge/backend/cpu/operator/ProducerImpl.hpp
index ee3d3d725151d2d6f4972815cb50250bd8d03817..032172dbf0995fc62ce631aa5eba1cabf2374ad3 100644
--- a/include/aidge/backend/cpu/operator/ProducerImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ProducerImpl.hpp
@@ -37,6 +37,7 @@ class ProducerImpl_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -48,4 +49,4 @@ static Registrar<Producer_Op> registrarProducer1DImpl_cpu("cpu", Aidge::Producer
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl.hpp b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
index b28da884b99fae0aeba5805bf4bdd15de60b8ffc..537bdeeaf89b388a82e819330649c2ae3445c590 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
@@ -49,7 +49,7 @@ class ReLUImpl_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-
+    void updateConsummerProducer() override final;
     void forward();
 
     void backward();
@@ -60,4 +60,4 @@ static Registrar<ReLU_Op> registrarReLUImpl_cpu("cpu", Aidge::ReLUImpl_cpu::crea
 }
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
index 7641a355d44bd3e9eddec9ba27ac8876696f02b2..08567ab98e55233f1f578e82cb39ac5681f0a839 100644
--- a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
@@ -49,7 +49,7 @@ class SoftmaxImpl_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-
+    void updateConsummerProducer() override final;
     void forward();
 
     void backward();
@@ -60,4 +60,4 @@ static Registrar<Softmax_Op> registrarSoftmaxImpl_cpu("cpu", Aidge::SoftmaxImpl_
 }
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_SOFTMAXIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_SOFTMAXIMPL_H_ */
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index 63e2bb47f2f9aa23daec6a8f01f4a896361be58a..d3da42185237a59146af17199e34a00dbebd6d96 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -48,7 +48,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbConsumedData(Aidge::IOIndex_t /*inpu
 Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
     return mNbProducedData[0];
 }
+void Aidge::AddImpl_cpu<1>::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
 
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
 void Aidge::AddImpl_cpu<1>::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -63,11 +69,6 @@ void Aidge::AddImpl_cpu<1>::forward() {
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
 
-    // Update producer-consumer data
-    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
 }
 
 void Aidge::AddImpl_cpu<1>::backward() {
@@ -112,7 +113,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbConsumedData(Aidge::IOIndex_t inputI
 Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
     return mNbProducedData[0];
 }
+void Aidge::AddImpl_cpu<2>::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
 
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
 void Aidge::AddImpl_cpu<2>::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -130,11 +137,6 @@ void Aidge::AddImpl_cpu<2>::forward() {
         mOp.mInputs[1]->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
 
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]+= getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
 }
 
 void Aidge::AddImpl_cpu<2>::backward() {
@@ -180,7 +182,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbProducedData(Aidge::IOIndex_t output
     assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
+void Aidge::AddImpl_cpu<3>::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
 
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
 void Aidge::AddImpl_cpu<3>::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -201,13 +209,8 @@ void Aidge::AddImpl_cpu<3>::forward() {
         mOp.mInputs[2]->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
 
-    // Update producer-consumer data
-    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
 }
 
 void Aidge::AddImpl_cpu<3>::backward() {
     printf("Not implemented yet.\n");
-}
\ No newline at end of file
+}
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index d8e7e591f4390d82eb6a34e0e6c5e0bc55178c00..6c434a5c38853a1dee66db5be95b6b1bfdde8162 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -54,7 +54,13 @@ Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t
     assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
-
+void Aidge::AvgPoolingImpl2D_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
+                                                                                           // amount for a forward pass
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
 void Aidge::AvgPoolingImpl2D_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -69,13 +75,6 @@ void Aidge::AvgPoolingImpl2D_cpu::forward() {
                mOp.getInput(0)->getImpl()->rawPtr(),
                mOp.getOutput(0)->getImpl()->rawPtr());
 
-
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
 void Aidge::AvgPoolingImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp
index dc63396ed13551d1720bec4c17e15dc09609ebea..a0d4d032ded9ede1b2dba307aa967af330167d25 100644
--- a/src/operator/BatchNormImpl.cpp
+++ b/src/operator/BatchNormImpl.cpp
@@ -53,7 +53,14 @@ Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t o
     assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
+void Aidge::BatchNormImpl2D_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
+                                                                   // amount for a forward pass
 
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
 void Aidge::BatchNormImpl2D_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -79,12 +86,7 @@ void Aidge::BatchNormImpl2D_cpu::forward() {
                mOp.getOutput(0)->getImpl()->rawPtr(),
                true);
 
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
 
-    mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
 void Aidge::BatchNormImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp
index 9d73662d48cb3dfd66abb236e2e70d166a869882..3e920cf68366b82bce8df29c8aea0c838e6a1364 100644
--- a/src/operator/ConvDepthWiseImpl.cpp
+++ b/src/operator/ConvDepthWiseImpl.cpp
@@ -55,7 +55,14 @@ Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbProducedData(Aidge::IOIndex
     assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
+void Aidge::ConvDepthWiseImpl2D_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
+                                                                   // amount for a forward pass
 
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
 void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -73,14 +80,6 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
     kernelFunc(mOp.getParams(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
                mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
                mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
-
-
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
 void Aidge::ConvDepthWiseImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index b5724790f63503e72a94db5615bde29af2800ba3..b4ddf80929923a9c2c5998ac8614ebb0d3afe000 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -55,7 +55,14 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t output
     assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
+void Aidge::ConvImpl2D_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
+                                                                   // amount for a forward pass
 
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
 void Aidge::ConvImpl2D_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -72,15 +79,7 @@ void Aidge::ConvImpl2D_cpu::forward() {
                mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
                mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
 
-    // FIXME: Dummy wait for some earlier scheduler tests
-    std::this_thread::sleep_for(std::chrono::milliseconds(mOp.get<ConvParam::OutChannels>()));
 
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
 void Aidge::ConvImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index 25c4955a5ceaa0753c00d44e8ff342826aa778c6..086902be0ab1c2027a8c62c143bc27921e5e9e1b 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -71,6 +71,16 @@ Aidge::NbElts_t Aidge::FCImpl_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx)
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
 
+void Aidge::FCImpl_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (IOIndex_t inputIdx = 0; static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx]
+            += getNbRequiredData(static_cast<std::size_t>(inputIdx)); // each input is consumed by the minimum
+                                              // amount for a forward pass
+
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
+
 void Aidge::FCImpl_cpu::forward()
 {
     // FIXME: uncomment the following code once memory handling will work
@@ -95,7 +105,7 @@ void Aidge::FCImpl_cpu::forward()
     //         mOp.mInputs[2]->getImpl()->rawPtr(),
     //         mOp.getOutput(0)->getImpl()->rawPtr());
     // }
-    // else 
+    // else
     kernelFunc(
         mOp.getParams(),
         mOp.getInput(0)->dims()[0],
@@ -104,19 +114,8 @@ void Aidge::FCImpl_cpu::forward()
         mOp.mInputs[1]->getImpl()->rawPtr(),
         mOp.mInputs[2]->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
-    
-    
-
-    // FIXME: Dummy wait for some earlier scheduler tests
-    std::this_thread::sleep_for(std::chrono::milliseconds(mOp.get<FCParam::OutChannels>()));
 
-    // Update producer-consumer data
-    for (IOIndex_t inputIdx = 0; static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]
-            += getNbRequiredData(static_cast<std::size_t>(inputIdx)); // each input is consumed by the minimum
-                                              // amount for a forward pass
 
-    mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
 void Aidge::FCImpl_cpu::backward()
diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp
index 85234e5d3eb058959b851851bee88081fa8fd4bb..f6a44d381081c7c7f1dcbbf02d91212168cc07aa 100644
--- a/src/operator/LeakyReLUImpl.cpp
+++ b/src/operator/LeakyReLUImpl.cpp
@@ -50,7 +50,11 @@ Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*i
 Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
     return mNbProducedData[0];
 }
+void Aidge::LeakyReLUImpl_cpu::updateConsummerProducer(){
+    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
 
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
 void Aidge::LeakyReLUImpl_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -65,11 +69,6 @@ void Aidge::LeakyReLUImpl_cpu::forward() {
         std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
-
-
-    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
 }
 
 void Aidge::LeakyReLUImpl_cpu::backward() {
diff --git a/src/operator/ProducerImpl.cpp b/src/operator/ProducerImpl.cpp
index 0135b7a0a4a677cb1066d6482bbc477fc6b954d3..664f3745414380fbaf5654ab035ba2ab957da87b 100644
--- a/src/operator/ProducerImpl.cpp
+++ b/src/operator/ProducerImpl.cpp
@@ -61,7 +61,8 @@ Aidge::DimSize_t Aidge::ProducerImpl_cpu::getNbProducedData(
 {
     return getRequiredMemory(0, {});
 }
-
+void Aidge::ProducerImpl_cpu::updateConsummerProducer(){
+}
 
 void Aidge::ProducerImpl_cpu::forward()
 {
@@ -71,4 +72,4 @@ void Aidge::ProducerImpl_cpu::forward()
 void Aidge::ProducerImpl_cpu::backward()
 {
     printf("Not implemented yet.\n");
-}
\ No newline at end of file
+}
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index 1fe231d44b9c856e19a926cc8b711e5a513395e9..cea50bc1e72cfa8e60cdd0f1839c03bcd568e052 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -50,7 +50,11 @@ Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputI
 Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
     return mNbProducedData[0];
 }
+void Aidge::ReLUImpl_cpu::updateConsummerProducer(){
+    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
 
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
 void Aidge::ReLUImpl_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -65,10 +69,6 @@ void Aidge::ReLUImpl_cpu::forward() {
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
 
-
-    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
 }
 
 void Aidge::ReLUImpl_cpu::backward() {
diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp
index f564534e14156f1c084b311c930f280351c9de8f..03e8f9305617f6a7ced878470e3c94ba625f5b22 100644
--- a/src/operator/SoftmaxImpl.cpp
+++ b/src/operator/SoftmaxImpl.cpp
@@ -50,7 +50,14 @@ Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inp
 Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
     return mNbProducedData[0];
 }
+void Aidge::SoftmaxImpl_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
+                                                                   // amount for a forward pass
 
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
 void Aidge::SoftmaxImpl_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");