diff --git a/aidge_backend_cpu/unit_tests/test_scheduler.py b/aidge_backend_cpu/unit_tests/test_scheduler.py
index bc76620390b7563f0088f4c600b612bbe827b170..d8cf3e164da4bd34273905b0b0e156cf057635a5 100644
--- a/aidge_backend_cpu/unit_tests/test_scheduler.py
+++ b/aidge_backend_cpu/unit_tests/test_scheduler.py
@@ -36,7 +36,54 @@ class test_scheduler(unittest.TestCase):
         for i in range(len(expected_out)):
             self.assertEqual(expected_out[i], out_tensor[i])
 
+    def test_sequential_scheduling(self):
+        input_data =  np.array([]).astype(np.float32)
+        input_tensor = aidge_core.Tensor(input_data)
 
+        input_node = aidge_core.Producer(input_tensor, "X")
+
+        graph_view = aidge_core.sequential([
+            aidge_core.FC(50, name='0'),
+            aidge_core.FC(50, name='1'),
+            aidge_core.FC(10, name='2'),
+        ])
+        EXPECTED_SCHEDULE = ['0', '1', '2']
+
+        input_node.add_child(graph_view)
+        input_node.get_operator().set_datatype(aidge_core.DataType.Float32)
+        input_node.get_operator().set_backend("cpu")
+        graph_view.set_datatype(aidge_core.DataType.Float32)
+        graph_view.set_backend("cpu")
+
+        scheduler = aidge_core.SequentialScheduler(graph_view)
+        scheduler.generate_scheduling()
+
+        self.assertListEqual([i.name() for i in scheduler.get_static_scheduling()], EXPECTED_SCHEDULE)
+
+
+    def test_parallel_scheduling(self):
+        input_data =  np.array([]).astype(np.float32)
+        input_tensor = aidge_core.Tensor(input_data)
+
+        input_node = aidge_core.Producer(input_tensor, "X")
+        graph_view = aidge_core.sequential([
+            aidge_core.FC(50, name='0'),
+            aidge_core.parallel([aidge_core.FC(50, name='1'), aidge_core.FC(50, name='3')]),
+            aidge_core.Add(name='2'),
+        ])
+
+        EXPECTED_SCHEDULE = [['0', '1', '3', '2'],  ['0', '3', '1', '2']] # Both scheduling are valid !
+
+        input_node.add_child(graph_view)
+        input_node.get_operator().set_datatype(aidge_core.DataType.Float32)
+        input_node.get_operator().set_backend("cpu")
+        graph_view.set_datatype(aidge_core.DataType.Float32)
+        graph_view.set_backend("cpu")
+
+        scheduler = aidge_core.SequentialScheduler(graph_view)
+        scheduler.generate_scheduling()
+
+        self.assertTrue([i.name() for i in scheduler.get_static_scheduling()] in EXPECTED_SCHEDULE)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/include/aidge/operator/AddImpl.hpp b/include/aidge/operator/AddImpl.hpp
index 4df4f18a04c5aa473a0f8bc988aedeb5cba3826d..6e1cd03a3af81ee85f4f9e0e212af7c02089734e 100644
--- a/include/aidge/operator/AddImpl.hpp
+++ b/include/aidge/operator/AddImpl.hpp
@@ -97,6 +97,7 @@ class AddImpl_cpu : public OperatorImpl {
         assert(outputIdx < mNbProducedData.size());
         return mNbProducedData[outputIdx];
     }
+    void updateConsummerProducer() override final;
 
     void forward() {
         // nothing
@@ -130,6 +131,7 @@ class AddImpl_cpu<1> : public OperatorImpl {
     NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
 
     NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -161,6 +163,7 @@ class AddImpl_cpu<2> : public OperatorImpl {
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
 
     NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -191,6 +194,7 @@ class AddImpl_cpu<3> : public OperatorImpl {
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
 
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -204,4 +208,4 @@ static Registrar<Add_Op<3>> registrarAddImpl3I_cpu("cpu", Aidge::AddImpl_cpu<3>:
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_H_ */
diff --git a/include/aidge/operator/AvgPoolingImpl.hpp b/include/aidge/operator/AvgPoolingImpl.hpp
index 429d428587ac3c893480c6661d311f0bfa7c785b..8373cb84a550efd8741a2dbc04c1e94ad37fe611 100644
--- a/include/aidge/operator/AvgPoolingImpl.hpp
+++ b/include/aidge/operator/AvgPoolingImpl.hpp
@@ -54,6 +54,7 @@ class AvgPoolingImpl2D_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -66,4 +67,4 @@ static Registrar<AvgPooling_Op<2>> registrarAvgPoolingImpl2D_cpu("cpu", Aidge::A
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_H_ */
diff --git a/include/aidge/operator/BatchNormImpl.hpp b/include/aidge/operator/BatchNormImpl.hpp
index 567a1580a7726b40a07f2a65a90bed5750b0e0f2..d9f25b4a8e38510f82fc5afe9ed4b656197a47d5 100644
--- a/include/aidge/operator/BatchNormImpl.hpp
+++ b/include/aidge/operator/BatchNormImpl.hpp
@@ -69,6 +69,7 @@ class BatchNormImpl2D_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -81,4 +82,4 @@ static Registrar<BatchNorm_Op<2>> registrarBatchNormImpl2D_cpu("cpu", Aidge::Bat
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_BATCHNORMIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_BATCHNORMIMPL_H_ */
diff --git a/include/aidge/operator/ConvDepthWiseImpl.hpp b/include/aidge/operator/ConvDepthWiseImpl.hpp
index e54025dbd0b72225d33ec660eb8373ee9014da89..0d21c676d797b2fc4e95c4aea47674c8fca5eef4 100644
--- a/include/aidge/operator/ConvDepthWiseImpl.hpp
+++ b/include/aidge/operator/ConvDepthWiseImpl.hpp
@@ -56,6 +56,7 @@ class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -68,4 +69,4 @@ static Registrar<ConvDepthWise_Op<2>> registrarConvDepthWiseImpl2D_cpu("cpu", Ai
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_H_ */
diff --git a/include/aidge/operator/ConvImpl.hpp b/include/aidge/operator/ConvImpl.hpp
index d6216cde05d74a76dc325540ce4d82745ffe2b2a..1f3dffe43b966bc37887f267cc56760a899476f9 100644
--- a/include/aidge/operator/ConvImpl.hpp
+++ b/include/aidge/operator/ConvImpl.hpp
@@ -56,6 +56,7 @@ class ConvImpl2D_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -68,4 +69,4 @@ static Registrar<Conv_Op<2>> registrarConvImpl2D_cpu("cpu", Aidge::ConvImpl2D_cp
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_CONVIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_CONVIMPL_H_ */
diff --git a/include/aidge/operator/FCImpl.hpp b/include/aidge/operator/FCImpl.hpp
index 9c577216a75b0c97180f7a6fc19b70b0cfa34e09..c69cc0b08a58877108c78d6f12c29e9089c2f665 100644
--- a/include/aidge/operator/FCImpl.hpp
+++ b/include/aidge/operator/FCImpl.hpp
@@ -50,7 +50,7 @@ class FCImpl_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-
+	void updateConsummerProducer() override final;
     void forward();
 
     void backward();
@@ -61,4 +61,4 @@ static Registrar<FC_Op> registrarFCImpl_cpu("cpu", Aidge::FCImpl_cpu::create);
 }
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_FCIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_FCIMPL_H_ */
diff --git a/include/aidge/operator/LeakyReLUImpl.hpp b/include/aidge/operator/LeakyReLUImpl.hpp
index b817306e2bc27329207572c15e8cdcb8f3ccce78..abe167bea16de01f861beb9701f747d39f265d9d 100644
--- a/include/aidge/operator/LeakyReLUImpl.hpp
+++ b/include/aidge/operator/LeakyReLUImpl.hpp
@@ -49,7 +49,7 @@ class LeakyReLUImpl_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-
+    void updateConsummerProducer() override final;
     void forward();
 
     void backward();
@@ -60,4 +60,4 @@ static Registrar<LeakyReLU_Op> registrarLeakyReLUImpl_cpu("cpu", Aidge::LeakyReL
 }
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ */
diff --git a/include/aidge/operator/ProducerImpl.hpp b/include/aidge/operator/ProducerImpl.hpp
index ee3d3d725151d2d6f4972815cb50250bd8d03817..032172dbf0995fc62ce631aa5eba1cabf2374ad3 100644
--- a/include/aidge/operator/ProducerImpl.hpp
+++ b/include/aidge/operator/ProducerImpl.hpp
@@ -37,6 +37,7 @@ class ProducerImpl_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+    void updateConsummerProducer() override final;
 
     void forward();
 
@@ -48,4 +49,4 @@ static Registrar<Producer_Op> registrarProducer1DImpl_cpu("cpu", Aidge::Producer
 }  // namespace
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_ */
diff --git a/include/aidge/operator/ReLUImpl.hpp b/include/aidge/operator/ReLUImpl.hpp
index b28da884b99fae0aeba5805bf4bdd15de60b8ffc..537bdeeaf89b388a82e819330649c2ae3445c590 100644
--- a/include/aidge/operator/ReLUImpl.hpp
+++ b/include/aidge/operator/ReLUImpl.hpp
@@ -49,7 +49,7 @@ class ReLUImpl_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-
+    void updateConsummerProducer() override final;
     void forward();
 
     void backward();
@@ -60,4 +60,4 @@ static Registrar<ReLU_Op> registrarReLUImpl_cpu("cpu", Aidge::ReLUImpl_cpu::crea
 }
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_H_ */
diff --git a/include/aidge/operator/SoftmaxImpl.hpp b/include/aidge/operator/SoftmaxImpl.hpp
index 7641a355d44bd3e9eddec9ba27ac8876696f02b2..08567ab98e55233f1f578e82cb39ac5681f0a839 100644
--- a/include/aidge/operator/SoftmaxImpl.hpp
+++ b/include/aidge/operator/SoftmaxImpl.hpp
@@ -49,7 +49,7 @@ class SoftmaxImpl_cpu : public OperatorImpl {
     NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-
+    void updateConsummerProducer() override final;
     void forward();
 
     void backward();
@@ -60,4 +60,4 @@ static Registrar<Softmax_Op> registrarSoftmaxImpl_cpu("cpu", Aidge::SoftmaxImpl_
 }
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_SOFTMAXIMPL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_SOFTMAXIMPL_H_ */
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index ece1967aaba42561108bbf14fbef02af71b509b9..96ef58fca97276d5267ba00bfb3edfe654f8f4f7 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -48,7 +48,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbConsumedData(Aidge::IOIndex_t /*inpu
 Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
     return mNbProducedData[0];
 }
+void Aidge::AddImpl_cpu<1>::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
 
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
 void Aidge::AddImpl_cpu<1>::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -63,11 +69,6 @@ void Aidge::AddImpl_cpu<1>::forward() {
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
 
-    // Update producer-consumer data
-    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
 }
 
 void Aidge::AddImpl_cpu<1>::backward() {
@@ -112,7 +113,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbConsumedData(Aidge::IOIndex_t inputI
 Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
     return mNbProducedData[0];
 }
+void Aidge::AddImpl_cpu<2>::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
 
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
 void Aidge::AddImpl_cpu<2>::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -130,11 +137,6 @@ void Aidge::AddImpl_cpu<2>::forward() {
         mOp.mInputs[1]->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
 
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]+= getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
 }
 
 void Aidge::AddImpl_cpu<2>::backward() {
@@ -180,7 +182,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbProducedData(Aidge::IOIndex_t output
     assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
+void Aidge::AddImpl_cpu<3>::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
 
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
 void Aidge::AddImpl_cpu<3>::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -201,13 +209,8 @@ void Aidge::AddImpl_cpu<3>::forward() {
         mOp.mInputs[2]->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
 
-    // Update producer-consumer data
-    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
 }
 
 void Aidge::AddImpl_cpu<3>::backward() {
     printf("Not implemented yet.\n");
-}
\ No newline at end of file
+}
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index eebaa5ddebd7e2f392187cbbd0fb6afb1f3a1dbb..0f54ea52ced9519a0ddf1dbc31af5d10776e6fb8 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -54,7 +54,13 @@ Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t
     assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
-
+void Aidge::AvgPoolingImpl2D_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
+                                                                                           // amount for a forward pass
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
 void Aidge::AvgPoolingImpl2D_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -69,13 +75,6 @@ void Aidge::AvgPoolingImpl2D_cpu::forward() {
                mOp.getInput(0)->getImpl()->rawPtr(),
                mOp.getOutput(0)->getImpl()->rawPtr());
 
-
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
 void Aidge::AvgPoolingImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp
index c8783b36d42231d7792a80cd134de3f491ec8374..aff01853cda5f374dd48361cb8bfec07d977eefa 100644
--- a/src/operator/BatchNormImpl.cpp
+++ b/src/operator/BatchNormImpl.cpp
@@ -53,7 +53,14 @@ Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t o
     assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
+void Aidge::BatchNormImpl2D_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
+                                                                   // amount for a forward pass
 
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
 void Aidge::BatchNormImpl2D_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -79,12 +86,7 @@ void Aidge::BatchNormImpl2D_cpu::forward() {
                mOp.getOutput(0)->getImpl()->rawPtr(),
                true);
 
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
 
-    mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
 void Aidge::BatchNormImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp
index 75a35cff1a56c2ee619e00e70f4c8e5d93356d68..e49c68cfb94f6096d51f4db05d4224d727036102 100644
--- a/src/operator/ConvDepthWiseImpl.cpp
+++ b/src/operator/ConvDepthWiseImpl.cpp
@@ -55,7 +55,14 @@ Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbProducedData(Aidge::IOIndex
     assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
+void Aidge::ConvDepthWiseImpl2D_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
+                                                                   // amount for a forward pass
 
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
 void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -73,14 +80,6 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
     kernelFunc(mOp.getParams(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
                mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
                mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
-
-
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
 void Aidge::ConvDepthWiseImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index e75fab1073f9da2bcd95ad7d0f6485164dc6be31..65779c62e8cd038fd8764bff7bfde1171ffe81d6 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -55,7 +55,14 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t output
     assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
+void Aidge::ConvImpl2D_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
+                                                                   // amount for a forward pass
 
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
 void Aidge::ConvImpl2D_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -72,15 +79,7 @@ void Aidge::ConvImpl2D_cpu::forward() {
                mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
                mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
 
-    // FIXME: Dummy wait for some earlier scheduler tests
-    std::this_thread::sleep_for(std::chrono::milliseconds(mOp.get<ConvParam::OutChannels>()));
 
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
 void Aidge::ConvImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index 970ce690e08a06bf89d19f1c09ab429d3053293b..c485ba2cfbeb608923f17ecb31720e252f067bd4 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -70,6 +70,16 @@ Aidge::NbElts_t Aidge::FCImpl_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx)
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
 
+void Aidge::FCImpl_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (IOIndex_t inputIdx = 0; static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx]
+            += getNbRequiredData(static_cast<std::size_t>(inputIdx)); // each input is consumed by the minimum
+                                              // amount for a forward pass
+
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
+
 void Aidge::FCImpl_cpu::forward()
 {
     // FIXME: uncomment the following code once memory handling will work
@@ -94,7 +104,7 @@ void Aidge::FCImpl_cpu::forward()
     //         mOp.mInputs[2]->getImpl()->rawPtr(),
     //         mOp.getOutput(0)->getImpl()->rawPtr());
     // }
-    // else 
+    // else
     kernelFunc(
         mOp.getParams(),
         mOp.getInput(0)->dims()[0],
@@ -103,19 +113,8 @@ void Aidge::FCImpl_cpu::forward()
         mOp.mInputs[1]->getImpl()->rawPtr(),
         mOp.mInputs[2]->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
-    
-    
-
-    // FIXME: Dummy wait for some earlier scheduler tests
-    std::this_thread::sleep_for(std::chrono::milliseconds(mOp.get<FCParam::OutChannels>()));
 
-    // Update producer-consumer data
-    for (IOIndex_t inputIdx = 0; static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]
-            += getNbRequiredData(static_cast<std::size_t>(inputIdx)); // each input is consumed by the minimum
-                                              // amount for a forward pass
 
-    mNbProducedData[0] += getRequiredMemory(0, {});
 }
 
 void Aidge::FCImpl_cpu::backward()
diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp
index 1e86de4ac2a033aa65603e1fdacc3321e9cf1e96..656ba5db074c9001b42dcabc8662ae196c83beb2 100644
--- a/src/operator/LeakyReLUImpl.cpp
+++ b/src/operator/LeakyReLUImpl.cpp
@@ -51,7 +51,11 @@ Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*i
 Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
     return mNbProducedData[0];
 }
+void Aidge::LeakyReLUImpl_cpu::updateConsummerProducer(){
+    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
 
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
 void Aidge::LeakyReLUImpl_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -66,11 +70,6 @@ void Aidge::LeakyReLUImpl_cpu::forward() {
         std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
-
-
-    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
 }
 
 void Aidge::LeakyReLUImpl_cpu::backward() {
diff --git a/src/operator/ProducerImpl.cpp b/src/operator/ProducerImpl.cpp
index 6c1de523a01d8bd1cd118d64b5c5cefd0563789d..3ca6d7066aabf515c12f91316f6e4f0987a5fa5a 100644
--- a/src/operator/ProducerImpl.cpp
+++ b/src/operator/ProducerImpl.cpp
@@ -61,7 +61,8 @@ Aidge::DimSize_t Aidge::ProducerImpl_cpu::getNbProducedData(
 {
     return getRequiredMemory(0, {});
 }
-
+void Aidge::ProducerImpl_cpu::updateConsummerProducer(){
+}
 
 void Aidge::ProducerImpl_cpu::forward()
 {
@@ -71,4 +72,4 @@ void Aidge::ProducerImpl_cpu::forward()
 void Aidge::ProducerImpl_cpu::backward()
 {
     printf("Not implemented yet.\n");
-}
\ No newline at end of file
+}
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index 61c119373315e7096cb6d711459b76f02b487188..222b1366a3b2c5ba487d3ecae5e745f059125659 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -51,7 +51,11 @@ Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputI
 Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
     return mNbProducedData[0];
 }
+void Aidge::ReLUImpl_cpu::updateConsummerProducer(){
+    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
 
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
 void Aidge::ReLUImpl_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -66,10 +70,6 @@ void Aidge::ReLUImpl_cpu::forward() {
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
 
-
-    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
 }
 
 void Aidge::ReLUImpl_cpu::backward() {
diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp
index 506730421d18da15383e277a8e5cb5bbb44375de..a28f3b45b4255a2c2513db71732b9e721bfe4cfd 100644
--- a/src/operator/SoftmaxImpl.cpp
+++ b/src/operator/SoftmaxImpl.cpp
@@ -51,7 +51,14 @@ Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inp
 Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
     return mNbProducedData[0];
 }
+void Aidge::SoftmaxImpl_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
+                                                                   // amount for a forward pass
 
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
 void Aidge::SoftmaxImpl_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");