diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index 8d896c8ec9eb92dd87689d84cad5fc09bf03c4f1..a4579e2951ccbafc4335ae428c62eba94c0757e5 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -95,60 +95,60 @@ build:ubuntu_python:
     paths:
       - venv/
 
-# build:windows_cpp:
-#   stage: build
-#   needs: []
-#   tags:
-#     - windows
-
-#   image: buildtools
-#   before_script:
-#     # Install Chocolatey
-#     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-#     # Install dependencies
-#     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-#     - choco install git -Y
-#     - choco install python -Y
-#     # Update PATH
-#     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-#   script:
-#     - mkdir -p build_cpp
-#     - mkdir -p install_cpp
-#     - cd build_cpp
-#     - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
-#     - cmake --build . -j2
-#     - cmake --install . --config Debug
-
-#   artifacts:
-#     expire_in: 1 week
-#     paths:
-#       - build_cpp/
-#       - install_cpp/
-
-# build:windows_python:
-#   stage: build
-#   needs: []
-#   tags:
-#     - windows
-
-#   image: buildtools
-#   before_script:
-#     # Install Chocolatey
-#     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-#     # Install dependencies
-#     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-#     - choco install git -Y
-#     - choco install python -Y
-#     # Update PATH
-#     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-#   script:
-#     - python -m pip install virtualenv
-#     - virtualenv venv
-#     - venv\Scripts\Activate.ps1
-#     # Numpy dependancy for unit test
-#     - python -m pip install -r requirements.txt
-#     - python -m pip install .
-#   artifacts:
-#     expire_in: 1 week
-#     paths:
-#       - venv/
+build:windows_cpp:
+  stage: build
+  needs: []
+  tags:
+    - windows
+
+  image: buildtools
+  before_script:
+    # Install Chocolatey
+    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+    # Install dependencies
+    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+    - choco install git -Y
+    - choco install python -Y
+    # Update PATH
+    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+  script:
+    - mkdir -p build_cpp
+    - mkdir -p install_cpp
+    - cd build_cpp
+    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
+    - cmake --build . -j2
+    - cmake --install . --config Debug
+
+  artifacts:
+    expire_in: 1 week
+    paths:
+      - build_cpp/
+      - install_cpp/
+
+build:windows_python:
+  stage: build
+  needs: []
+  tags:
+    - windows
+
+  image: buildtools
+  before_script:
+    # Install Chocolatey
+    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+    # Install dependencies
+    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+    - choco install git -Y
+    - choco install python -Y
+    # Update PATH
+    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+  script:
+    - python -m pip install virtualenv
+    - virtualenv venv
+    - venv\Scripts\Activate.ps1
+    # Numpy dependancy for unit test
+    - python -m pip install -r requirements.txt
+    - python -m pip install .
+  artifacts:
+    expire_in: 1 week
+    paths:
+      - venv/
diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml
index abe526cdf3fac882177509cade20e5ed58ed7f77..81e6ca9ac5b868287aa0ef27040c0ead785d3639 100644
--- a/.gitlab/ci/test.gitlab-ci.yml
+++ b/.gitlab/ci/test.gitlab-ci.yml
@@ -26,23 +26,23 @@ test:ubuntu_python:
     reports:
       junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml
 
-# test:windows_cpp:
-#   stage: test
-#   needs: ["build:windows_cpp"]
-#   tags:
-#     - windows
-#   image: buildtools
-#   before_script:
-#     # Install Chocolatey
-#     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-#     # Install dependencies
-#     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-#     - choco install python -Y
-#     # Update PATH
-#     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-#   script:
-#     - cd build_cpp
-#     - ctest --output-junit ctest-results.xml --output-on-failure
-#   artifacts:
-#     reports:
-#       junit: build_cpp/ctest-results.xml
+test:windows_cpp:
+  stage: test
+  needs: ["build:windows_cpp"]
+  tags:
+    - windows
+  image: buildtools
+  before_script:
+    # Install Chocolatey
+    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+    # Install dependencies
+    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+    - choco install python -Y
+    # Update PATH
+    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+  script:
+    - cd build_cpp
+    - ctest --output-junit ctest-results.xml --output-on-failure
+  artifacts:
+    reports:
+      junit: build_cpp/ctest-results.xml
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a0d70035e0e150ec33dc4806bd02632debbf0a42..d26a511d2c778804eef1df17b6620bc858cfa0c3 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -30,8 +30,17 @@ endif()
 
 ##############################################
 # Find system dependencies
+Include(FetchContent)
 
+FetchContent_Declare(
+    fmt
+    GIT_REPOSITORY https://github.com/fmtlib/fmt.git
+    GIT_TAG        10.2.1 # or a later release
+)
 
+set(FMT_SYSTEM_HEADERS ON)
+FetchContent_MakeAvailable(fmt)
+set_property(TARGET fmt PROPERTY POSITION_INDEPENDENT_CODE ON)
 
 ##############################################
 # Create target and set properties
@@ -64,6 +73,7 @@ if (PYBIND)
         )
 endif()
 
+target_link_libraries(${module_name} PUBLIC fmt::fmt)
 target_compile_features(${module_name} PRIVATE cxx_std_14)
 
 if (DOSANITIZE STREQUAL "ON")
diff --git a/aidge_core-config.cmake.in b/aidge_core-config.cmake.in
index adfbf2838bdbba48c7c2e8420fece43054cd39d3..9862b640541458bdab1b1b8bc2a90297625e35ee 100644
--- a/aidge_core-config.cmake.in
+++ b/aidge_core-config.cmake.in
@@ -1,5 +1,8 @@
 @PACKAGE_INIT@
 
+include(CMakeFindDependencyMacro)
+find_dependency(fmt)
+
 include(${CMAKE_CURRENT_LIST_DIR}/aidge_core-config-version.cmake)
 
 include(${CMAKE_CURRENT_LIST_DIR}/aidge_core-targets.cmake)
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index fb7ed0587fb074858e9f3766d5de0d43b39d1ef5..c541ae0e03459a0a7200795bc2d3c6b70c13be3b 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -120,7 +120,7 @@ class test_operator_binding(unittest.TestCase):
         generic_op = generic_node.get_operator()
         customImpl = PythonCustomImpl(generic_op)
 
-        generic_op.forward() # Do nothing, no implementation set
+        #generic_op.forward() # Throw an error, no implementation set
         generic_op.set_impl(customImpl)
         generic_op.forward() # Increment idx
         self.assertEqual(customImpl.idx, 1)
diff --git a/aidge_core/unit_tests/test_recipies.py b/aidge_core/unit_tests/test_recipes.py
similarity index 98%
rename from aidge_core/unit_tests/test_recipies.py
rename to aidge_core/unit_tests/test_recipes.py
index cc571d8e5db1beae7fbdb0047c8ae7ced3339fc9..240bcd9501aa1fd64985fa59c87f01dfdf9343aa 100644
--- a/aidge_core/unit_tests/test_recipies.py
+++ b/aidge_core/unit_tests/test_recipes.py
@@ -11,7 +11,7 @@ SPDX-License-Identifier: EPL-2.0
 import unittest
 import aidge_core
 
-class test_recipies(unittest.TestCase):
+class test_recipes(unittest.TestCase):
     """
     """
     def setUp(self):
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 84c5404491223746af89e0bb6f8a7c9a40017133..10adfc6f3ba91f413a312598e26a0b6047ecd359 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -66,7 +66,7 @@
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/stimuli/Stimulus.hpp"
 
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 19f0837504016f38ae96dd852bc6fa41b5ab53ba..8b5aba10dbc2691b5d607cda28eba621335881d1 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -63,6 +63,12 @@ public:
      */
     virtual void updateConsummerProducer();
 
+    /**
+     * @brief Reset the Consummer Producer system.
+     *
+     */
+    virtual void resetConsummerProducer();
+
     virtual ~OperatorImpl() = default;
 
 protected:
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 12551e71cd646564321eeb23f64bf68d77a8886c..509c11691047604fbce959cfb29649aac75b5a1e 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -167,7 +167,7 @@ public:
     */
     virtual void setRawPtr(void* /*ptr*/, NbElts_t /*length*/)
     {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Cannot set raw pointer for backend %s", mBackend);
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Cannot set raw pointer for backend {}", mBackend);
     };
 
     /**
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 78efc4a29f5aef4395b556d23d99da7609ff762c..549232b2635f48b979208bb2f91b845dacef6f8b 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -41,7 +41,7 @@ public:
 
         std::size_t i = 0;
         for (; i < mNbElts &&
-               *(mData.data()+i) == *static_cast<const T*>(typedOtherImpl.rawPtr(i));
+               *static_cast<const T*>(rawPtr(i)) == *static_cast<const T*>(typedOtherImpl.rawPtr(i));
                ++i) {
         }
         return i == mNbElts;
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index bf34860fbc4e4d6cfef8528d20de40c3e31a292b..d8412dbd4ddb4ec371649d180bce10a80dd624f3 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -47,14 +47,14 @@ enum class DataType {
 
 class Data {
 public:
-    constexpr Data(const char* type): mType(type) {};
-    constexpr const char* type() const {
+    Data(const std::string& type): mType(type) {};
+    constexpr const std::string& type() const {
         return mType;
     }
     virtual ~Data() = default;
 
 private:
-    const char* mType;
+    const std::string mType;
 };
 }
 
@@ -80,4 +80,8 @@ const char* const EnumStrings<Aidge::DataType>::data[]
        "UInt7", "UInt8", "UInt16", "UInt32", "UInt64"};
 }
 
+namespace Aidge {
+inline auto format_as(DataType dt) { return EnumStrings<Aidge::DataType>::data[static_cast<int>(dt)]; }
+}
+
 #endif /* AIDGE_DATA_H_ */
\ No newline at end of file
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 3ccd55d3f19b3cff70c1a100d980ae63213261c5..95101bb3ad1704f4acb8dd3e46ef7ee450f1f91f 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -452,7 +452,7 @@ class Tensor : public Data,
 
     std::string toString() const;
 
-    inline void print() const { printf("%s\n", toString().c_str()); }
+    inline void print() const { fmt::print("{}\n", toString()); }
 
     std::shared_ptr<Tensor> grad() {
         if (!mGrad) {
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 392fb59e65b8b844a091aaa89e7d623986dda85b..3311797d858cf4899a6cfed7a18fb9840afb514e 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -96,8 +96,13 @@ public:
      * specified location.
      * @param path
      */
-    void save(std::string path, bool verbose = false, bool showProducers = true) const;
+    void save(const std::string& path, bool verbose = false, bool showProducers = true) const;
 
+    /**
+     * Check that a node is in the current GraphView.
+     * @param nodePtr Node to check
+     * @return bool True is nodePtr belongs to the GraphView.
+    */
     inline bool inView(NodePtr nodePtr) const {
         return mNodes.find(nodePtr) != mNodes.end();
     }
@@ -106,6 +111,8 @@ public:
         return mRootNode;
     }
 
+    void setRootNode(NodePtr node);
+
 ///////////////////////////////////////////////////////
 //        TENSOR MANAGEMENT
 ///////////////////////////////////////////////////////
@@ -114,7 +121,9 @@ public:
     inline std::set<NodePtr> inputNodes() const noexcept {
         std::set<NodePtr> nodes;
         for (auto node : mInputNodes) {
-            nodes.insert(node.first);
+            if (node.first != nullptr) {
+                nodes.insert(node.first);
+            }
         }
         return nodes;
     }
@@ -122,7 +131,9 @@ public:
     inline std::set<NodePtr> outputNodes() const noexcept {
         std::set<NodePtr> nodes;
         for (auto node : mOutputNodes) {
-            nodes.insert(node.first);
+            if (node.first != nullptr) {
+                nodes.insert(node.first);
+            }
         }
         return nodes;
     }
@@ -140,8 +151,8 @@ public:
     void setOrderedInputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& inputs);
     void setOrderedOutputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& outputs);
 
-    inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedInputs() { return mInputNodes; };
-    inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedOutputs() { return mOutputNodes; };
+    inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedInputs() const { return mInputNodes; };
+    inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedOutputs() const { return mOutputNodes; };
 
     /**
      * @brief List outside data input connections of the GraphView.
@@ -172,7 +183,7 @@ public:
      * @brief List all input connections (within and outside) of the specified GraphView node named "name".
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
-    std::vector<std::pair<NodePtr, IOIndex_t>> inputs(std::string name) const;
+    std::vector<std::pair<NodePtr, IOIndex_t>> inputs(const std::string& name) const;
 
     /**
      * @brief List outside output connections of the GraphView. The vector
@@ -188,7 +199,7 @@ public:
      * @return std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>
      */
     std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs(
-            std::string nodeName) const;
+            const std::string& nodeName) const;
 
     /**
      * @brief Assert Datatype, Backend, data format and dimensions along the GraphView are coherent.
@@ -262,6 +273,34 @@ public:
      */
     NodePtr getNode(const std::string& nodeName) const;
 
+    /**
+     * Get the ranked list of nodes in the GraphView.
+     * Node ranking if performed the following:
+     * - The root node is put in the ranked list first (rank 1);
+     * - Then, its childs (in order of outputs) are added in the ranked list;
+     * - Then, its parents (in order of inputs) are added in the ranked list;
+     * - The childs and parents of the next node in the ranked list are then
+     *   added to the list, and so on.
+     * - Any remaining nodes have no path to the root node and are added in
+     *   arbitrary order. In this case, the ranking is not garanteed to be unique.
+     * 
+     * If the ranking cannot be garanteed to be unique, the second item indicates
+     * the rank from which unicity cannot be garanteed.
+     * @return std::pair<std::vector<NodePtr>, size_t> Pair with the list of ranked
+     * nodes and the size of the ranked sub-list where unicity is garanteed.
+    */
+    std::pair<std::vector<NodePtr>, size_t> getRankedNodes() const;
+
+    /**
+     * Get the nodes name according to the GraphView nodes ranking.
+     * @param format The formatting string to be used with fmt::format().
+     * The usable positional arguments are the following:
+     * {0} node name, {1} node type, {2} rank, {3} type rank
+     * @param markNonUnicity If true, non unique ranking is prefixed with "?"
+     * @return std::map<NodePtr, std::string> A map with the corresponding names
+    */
+    std::map<NodePtr, std::string> getRankedNodesName(const std::string& format, bool markNonUnicity = true) const;
+
     /**
      * @brief Remove a Node from the current GraphView scope without affecting its connections.
      * @param nodePtr Node to remove
@@ -375,7 +414,7 @@ public:
      */
     bool swap(Node &node, Node &otherNode);
 
-    void link(std::string name1_inID, std::string name2_outID);
+    void link(const std::string& name1_inID, const std::string& name2_outID);
 
     /**
      * @brief Insert a node (newParentNode) as a parent of the passed node (childNode).
@@ -411,6 +450,7 @@ public:
      * @return true replacement has been performed
      * @return false no replacement has been performed
      */
+    static bool replace(const std::shared_ptr<GraphView>& oldG, const std::shared_ptr<GraphView>& newG);
     static bool replace(const std::set<NodePtr>& oldNodes, const std::set<NodePtr>& newNodes);
 
     /**
@@ -486,6 +526,13 @@ private:
 
     void _forwardDims(std::set<NodePtr> listNodes);
 };
+
+/**
+ * Create a GraphView containing all nodes with a path to given argument.
+ * @param node Initial node to construct the graph.
+ * @return GraphView GraphView containing all nodes with a path to node.
+*/
+std::shared_ptr<GraphView> getConnectedGraphView(std::shared_ptr<Node> node);
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_GRAPH_GRAPHVIEW_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index a2098ff36b40b78eb12a36fe28793e8dd73d9d9c..5066cb78f86bfc87d33fce4ecd8f302c40cb14d2 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -75,7 +75,7 @@ public:
     void computeOutputDims() override final {
         // check inputs have been associated
         if (!getInput(0)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
         }
         if (!(getInput(0)->empty())) {
             std::array<DimSize_t, DIM + 2> outputDims;
@@ -111,7 +111,7 @@ public:
 
             for (DimIdx_t i = 0; i < (DIM+2); ++i) {
                 if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
                 }
             }
 
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 06cc468bd7266bbcfeb6802f274c536ec09867fc..62a9540105d77866167d87b9733ed473e03f0151 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -84,17 +84,23 @@ public:
         const auto firstInputNbDims = getInput(0) -> nbDims();
         for (IOIndex_t i = 1; i < nbInputs(); ++i) {
             if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
             }
-            associated &= (getInput(i)->nbDims() == firstInputNbDims);
-            for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
-                if (dim == getAttr<ConcatAttr::Axis>()) {
-                    outputDims[dim] += getInput(i)->dims()[dim];
-                }
-                else {
-                    associated &= (getInput(i)->dims()[dim] == outputDims[dim]);
+
+            if (getInput(i)->nbDims() == firstInputNbDims) {
+                for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
+                    if (dim == getAttr<ConcatAttr::Axis>()) {
+                        outputDims[dim] += getInput(i)->dims()[dim];
+                    }
+                    else {
+                        associated &= (getInput(i)->dims()[dim] == outputDims[dim]);
+                    }
                 }
             }
+            else {
+                associated = false;
+                break;
+            }
         }
         if (associated) {
             getOutput(0)->resize(outputDims);
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index be5fb3e393ced7ee7a53e27426b4247e48b478e8..8290fb3d0d978e9af3291809c5057406424096d5 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -94,7 +94,7 @@ public:
         bool associated = true;
         for (IOIndex_t i = 0; i < 3; ++i) {
             if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
             }
             associated &= !(getInput(i)->empty());
         }
@@ -133,7 +133,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> co
 
             for (DimIdx_t i = 0; i < (DIM+2); ++i) {
                 if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
                 }
             }
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 9d0c0bf408a2f634f96881cd339c330340d5e344..a3b537ba60d03209e078dc94348f001603d2f3f5 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -85,7 +85,7 @@ public:
         bool associated = true;
         for (IOIndex_t i = 0; i < 3; ++i) {
             if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
             }
             associated &= !(getInput(i)->empty());
         }
@@ -128,7 +128,7 @@ public:
 
             for (DimIdx_t i = 0; i < (DIM+2); ++i) {
                 if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
                 }
             }
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index a73734ad20e10fe2a3e1d0d12d40e584b4540fb4..f6d81b5781dd25c990f496fa9f592502c9705eba 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -70,11 +70,12 @@ public:
 
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
-        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
-        if (inputIdx == 2) {
-            assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0));
-            assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
-        }
+        assert(data->type() == Tensor::Type && "input data must be of Tensor type");
+        // TODO: FIXME: check this, because data dims may not be initialized at this point...
+        //if (inputIdx == 2) {
+        //    assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0));
+        //    assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
+        //}
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
         if (inputIdx == 0 && getInput(0)->nbDims() == 1)
             mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()});
@@ -84,7 +85,7 @@ public:
         bool associated = true;
         for (IOIndex_t i = 0; i < nbInputs(); ++i) {
             if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
             }
             associated &= !(getInput(i)->empty());
         }
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 624af6e755d882ca9585ac2e4175f9c3977e4058..c315e671c2f084af869e3b21107066137496366b 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -38,7 +38,9 @@ private:
 public:
     GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
         : OperatorTensor(type, nbData, nbParam, nbOut)
-    {}
+    {        
+        mImpl = std::make_shared<OperatorImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -46,7 +48,9 @@ public:
      */
     GenericOperator_Op(const GenericOperator_Op& op)
         : OperatorTensor(op)
-    {}
+    {
+        mImpl = std::make_shared<OperatorImpl>(*this);
+    }
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -58,6 +62,7 @@ public:
 
     // Helper functions that can be used with setComputeOutputDims():
     static const ComputeDimsFunc Identity;
+    static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs);
 
     inline void setComputeOutputDims(ComputeDimsFunc func) {
         mComputeOutputDims = func;
@@ -97,22 +102,8 @@ public:
 
     ~GenericOperator_Op() = default;
 
-    void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { printf("setBackend: not available yet.\n"); }
-    void setDataType(const DataType& /*datatype*/) const override { printf("setDataType: not available yet.\n"); }
-    void forward() override final {
-        if(mImpl){
-            mImpl->forward();
-        }else{
-            printf("forward: No implementation is linked.\n");
-        }
-    }
-    void backward() override final {
-        if(mImpl){
-            mImpl->backward();
-        }else{
-            printf("backward: No implementation is linked.\n");
-        }
-    }
+    void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); }
+    void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); }
 };
 
 /**
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 57cd20311a4e4c98966af0af98b9fe4533155ea6..c2e6eaff77971c3dcf350a02bc5089d08b5c8488 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -78,29 +78,19 @@ public:
     void backward() override final { }
 
     void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override final {
-        if (strcmp(data->type(), "Tensor") != 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as outputs", type().c_str());
-        }
-        if (outputIdx >= nbInputs()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbInputs());
-        }
+        AIDGE_ASSERT(data->type() == "Tensor", "{} Operator only accepts Tensors as outputs", type());
+        AIDGE_ASSERT(outputIdx < nbInputs(), "{} Operator has {} outputs", type(), nbInputs());
         *mInputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data);
     }
 
     void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override final {
-        if (strcmp(data->type(), "Tensor") != 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
-        }
-        if (outputIdx >= nbInputs()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbInputs());
-        }
+        AIDGE_ASSERT(data->type() == "Tensor", "{} Operator only accepts Tensors as inputs", type());
+        AIDGE_ASSERT(outputIdx < nbInputs(), "{} Operator has {} outputs", type(), nbInputs());
         *mInputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
     }
 
     const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const override final {
-        if (outputIdx >= nbInputs()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbInputs());
-        }
+        AIDGE_ASSERT(outputIdx < nbInputs(), "{} Operator has {} outputs", type(), nbInputs());
         if (mInputs[outputIdx] == nullptr){
             return mOutputs[outputIdx]; // Input is not initialized with empty tensor
         }
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 467a69d73c98a21c85e956acf42536e197833cbd..b07fa38a41c664c4fcbf90227914264ec68390a0 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -78,7 +78,7 @@ public:
 
     void computeOutputDims() override final {
         if (!getInput(0)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
         }
         if (!(getInput(0)->empty())) {
             std::array<DimSize_t, DIM + 2> outputDims{};
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..8991ccb44eb4926f375ff102858f4683e1bea4d8
--- /dev/null
+++ b/include/aidge/operator/Memorize.hpp
@@ -0,0 +1,101 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_MEMORIZE_H_
+#define AIDGE_CORE_OPERATOR_MEMORIZE_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/StaticAttributes.hpp"
+
+namespace Aidge {
+enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep };
+
+class Memorize_Op : public OperatorTensor,
+    public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)>,
+    public StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int> {
+public:
+    static const std::string Type;
+
+    using Attributes_ = StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int>;
+    template <MemorizeAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
+    Memorize_Op(const unsigned int endStep)
+        : OperatorTensor(Type, 1, 1, 2),
+          Attributes_(attr<MemorizeAttr::ScheduleStep>(0),
+                      attr<MemorizeAttr::ForwardStep>(0),
+                      attr<MemorizeAttr::EndStep>(endStep))
+    {
+        mOutputs[1] = mOutputs[0];
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Memorize_Op(const Memorize_Op& op)
+        : OperatorTensor(op),
+          Attributes_(op)
+    {
+        mImpl = op.mImpl ? Registrar<Memorize_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mOutputs[1] = mOutputs[0];
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Memorize_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Memorize_Op>(*this);
+    }
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
+        mImpl = Registrar<Memorize_Op>::create({name})(*this);
+        mOutputs[0]->setBackend(name, device);
+    }
+
+    void computeOutputDims() override;
+    bool outputDimsForwarded() const override;
+    void updateConsummerProducer() override;
+    void forward() override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "data_input_init"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output", "data_output_rec"};
+    }
+};
+
+inline std::shared_ptr<Node> Memorize(const unsigned int endStep, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
+    "ScheduleStep",
+    "ForwardStep",
+    "EndStep"
+};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_MEMORIZE_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 5955d860a2e9a0db9bb296552927c40eb411f30d..7f36eca2c4586f61f72e0d842d2d576450cd1596 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -25,6 +25,7 @@ public:
     // Micro-graph handling:
     std::shared_ptr<GraphView> mGraph; // Meta operator micro-graph
     std::shared_ptr<SequentialScheduler> mScheduler;
+    std::weak_ptr<Node> mUpperNode;
 
    public:
     MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph);
@@ -38,6 +39,13 @@ public:
           mGraph(op.mGraph->clone())
     {}
 
+    /**
+     * Set the node that should be used for the scheduling.
+    */
+    void setUpperNode(std::shared_ptr<Node> node) {
+        mUpperNode = node;
+    }
+
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MetaOperator_Op
@@ -55,7 +63,8 @@ public:
     }
 
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
-        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+        AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
+        AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
 
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
         inputOp.first->getOperator()->associateInput(inputOp.second, data);
@@ -65,8 +74,17 @@ public:
     }
 
     void computeOutputDims() override final {
-        // Forward dims of micro-graph
-        mGraph->forwardDims();
+        // Check first that all required inputs are available, otherwise
+        // mGraph->forwardDims() will fail!
+        bool forwarded = true;
+        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+            forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
+        }
+
+        if (forwarded) {
+            // Forward dims of micro-graph
+            mGraph->forwardDims();
+        }
     }
 
 
@@ -90,6 +108,8 @@ public:
     }
 
     NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override;
+    NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override;
     NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override;
     NbElts_t getNbProducedData(IOIndex_t outputIdx) const override;
 
@@ -107,7 +127,10 @@ inline std::shared_ptr<Node> MetaOperator(const char *type,
                                   const std::shared_ptr<GraphView>& graph,
                                   const std::string& name = "")
 {
-    return std::make_shared<Node>(std::make_shared<MetaOperator_Op>(type, graph), name);
+    auto op = std::make_shared<MetaOperator_Op>(type, graph);
+    auto node = std::make_shared<Node>(op, name);
+    op->setUpperNode(node);
+    return node;
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 2832f9fce005e0ae9d2bab98bf764c68f93e3cda..8f1de7c0e92558a4b47962c3a375764e1bd1c2ee 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -18,6 +18,14 @@
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Memorize.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/Identity.hpp"
+#include "aidge/operator/Concat.hpp"
+#include "aidge/operator/Tanh.hpp"
+#include "aidge/operator/Sigmoid.hpp"
 
 namespace Aidge {
 template <std::array<DimSize_t, 1>::size_type DIM>
@@ -135,6 +143,116 @@ inline std::shared_ptr<Node> PaddedMaxPooling(
 {
     return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode);
 }
+
+inline std::shared_ptr<Node> LSTM(DimSize_t in_channels,
+                                  DimSize_t hidden_channels,
+                                  DimSize_t seq_length,
+                                  bool noBias = false,
+                                  const std::string& name = "")
+{
+    // Construct micro-graph
+    auto input = Identity((!name.empty()) ? name + "_input" : "");
+    auto hiddenState = Memorize(seq_length, (!name.empty()) ? name + "_hidden_state" : "");
+    auto cellState = Memorize(seq_length, (!name.empty()) ? name + "_cell_state" : "");
+    auto add = Add(2, (!name.empty()) ? name + "_add" : "");
+
+    // Forget gate
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_forgetGateX" : "");
+    input->addChild(forgetGateX, 0, 0);
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_forgetGateH" : "");
+    hiddenState->addChild(forgetGateH, 1, 0);
+    auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : "");
+    forgetGateX->addChild(forgetGate, 0, 0);
+    forgetGateH->addChild(forgetGate, 0, 1);
+    auto forgetGateAct = Sigmoid((!name.empty()) ? name + "_forgetGateAct" : "");
+    auto forgetGateMul = Mul((!name.empty()) ? name + "_forgetGateMul" : "");
+    forgetGate->addChild(forgetGateAct, 0, 0);
+    forgetGateAct->addChild(forgetGateMul, 0, 0);
+    forgetGateMul->addChild(add, 0, 0);
+    cellState->addChild(forgetGateMul, 1, 1);
+
+    // Input gate
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_inputGateX" : "");
+    input->addChild(inputGateX, 0, 0);
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_inputGateH" : "");
+    hiddenState->addChild(inputGateH, 1, 0);
+    auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : "");
+    inputGateX->addChild(inputGate, 0, 0);
+    inputGateH->addChild(inputGate, 0, 1);
+    auto inputGateAct = Sigmoid((!name.empty()) ? name + "_inputGateAct" : "");
+    auto inputGateMul = Mul((!name.empty()) ? name + "_inputGateMul" : "");
+    inputGate->addChild(inputGateAct, 0, 0);
+    inputGateAct->addChild(inputGateMul, 0, 0);
+    inputGateMul->addChild(add, 0, 1);
+
+    // Candidate for cell update
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_cellCandidateX" : "");
+    input->addChild(cellCandidateX, 0, 0);
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_cellCandidateH" : "");
+    hiddenState->addChild(cellCandidateH, 1, 0);
+    auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : "");
+    cellCandidateX->addChild(cellCandidate, 0, 0);
+    cellCandidateH->addChild(cellCandidate, 0, 1);
+    auto cellCandidateAct = Tanh((!name.empty()) ? name + "_cellCandidateAct" : "");
+    cellCandidate->addChild(cellCandidateAct, 0, 0);
+    cellCandidateAct->addChild(inputGateMul, 0, 1);
+
+    // Output gate
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_outputGateX" : "");
+    input->addChild(outputGateX, 0, 0);
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_outputGateH" : "");
+    hiddenState->addChild(outputGateH, 1, 0);
+    auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : "");
+    outputGateX->addChild(outputGate, 0, 0);
+    outputGateH->addChild(outputGate, 0, 1);
+    auto outputGateAct = Sigmoid((!name.empty()) ? name + "_outputGateAct" : "");
+    auto outputGateMul = Mul((!name.empty()) ? name + "_outputGateMul" : "");
+    outputGate->addChild(outputGateAct, 0, 0);
+    outputGateAct->addChild(outputGateMul, 0, 0);
+
+    // Updated cell state to help determine new hidden state
+    auto cellUpdatedAct = Tanh((!name.empty()) ? name + "_cellUpdatedAct" : "");
+    add->addChild(cellUpdatedAct, 0, 0);
+    cellUpdatedAct->addChild(outputGateMul, 0, 1);
+    outputGateMul->addChild(hiddenState, 0, 0);
+    add->addChild(cellState, 0, 0);
+
+    std::shared_ptr<GraphView> microGraph = std::make_shared<GraphView>();
+    microGraph->add(input);
+    microGraph->add({hiddenState, cellState, add,
+        forgetGateX, forgetGateH, forgetGate, forgetGateAct, forgetGateMul,
+        inputGateX, inputGateH, inputGate, inputGateAct, inputGateMul,
+        cellCandidateX, cellCandidateH, cellCandidate, cellCandidateAct,
+        outputGateX, outputGateH, outputGate, outputGateAct, outputGateMul,
+        cellUpdatedAct}, false);
+
+    microGraph->setOrderedInputs({{input, 0},
+        {inputGateX, 1}, {outputGateX, 1}, {forgetGateX, 1}, {cellCandidateX, 1},
+        {inputGateH, 1}, {outputGateH, 1}, {forgetGateH, 1}, {cellCandidateH, 1},
+        {inputGateX, 2}, {outputGateX, 2}, {forgetGateX, 2}, {cellCandidateX, 2},
+        {inputGateH, 2}, {outputGateH, 2}, {forgetGateH, 2}, {cellCandidateH, 2},
+        {hiddenState, 1}, {cellState, 1}});
+    microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
+
+    auto metaOp = MetaOperator("LSTM", microGraph, name);
+    addProducer(metaOp, 1, {hidden_channels, in_channels}, "wi");
+    addProducer(metaOp, 2, {hidden_channels, in_channels}, "wo");
+    addProducer(metaOp, 3, {hidden_channels, in_channels}, "wf");
+    addProducer(metaOp, 4, {hidden_channels, in_channels}, "wc");
+    addProducer(metaOp, 5, {hidden_channels, hidden_channels}, "ri");
+    addProducer(metaOp, 6, {hidden_channels, hidden_channels}, "ro");
+    addProducer(metaOp, 7, {hidden_channels, hidden_channels}, "rf");
+    addProducer(metaOp, 8, {hidden_channels, hidden_channels}, "rc");
+    addProducer(metaOp, 9, {(noBias ? 0 : hidden_channels)}, "wbi");
+    addProducer(metaOp, 10, {(noBias ? 0 : hidden_channels)}, "wbo");
+    addProducer(metaOp, 11, {(noBias ? 0 : hidden_channels)}, "wbf");
+    addProducer(metaOp, 12, {(noBias ? 0 : hidden_channels)}, "wbc");
+    addProducer(metaOp, 13, {(noBias ? 0 : hidden_channels)}, "rbi");
+    addProducer(metaOp, 14, {(noBias ? 0 : hidden_channels)}, "rbo");
+    addProducer(metaOp, 15, {(noBias ? 0 : hidden_channels)}, "rbf");
+    addProducer(metaOp, 16, {(noBias ? 0 : hidden_channels)}, "rbc");
+    return metaOp;
+}
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 808450030bdfc176c9cbc435c76b4932586397b8..a0d2292b7860baa60fe537698784d4d250c81f42 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -98,10 +98,10 @@ public:
     virtual void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) = 0;
     virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
 
-    std::shared_ptr<Hook> getHook(std::string hookName) {
+    std::shared_ptr<Hook> getHook(const std::string& hookName) {
         return mHooks[hookName];
     }
-    void addHook(std::string hookName) {
+    void addHook(const std::string& hookName) {
         mHooks.insert(std::pair<std::string, std::shared_ptr<Hook>>(hookName,Registrar<Hook>::create({hookName})(shared_from_this())));
     }
 
@@ -121,14 +121,22 @@ public:
     inline void setImpl(std::shared_ptr<OperatorImpl> impl) { mImpl = impl; }
 
     /**
-     * @brief Minimum amount of data from a specific input for one computation pass.
+     * @brief Minimum amount of data from a specific input required by the
+     * implementation to be run.
+     *
      * @param inputIdx Index of the input analysed.
      * @return NbElts_t
      */
     virtual NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
+    // Amount of input data that cannot be overwritten during the execution.
+    virtual NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
+
+    // Memory required at an output for a given input size.
+    virtual NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
+
     /**
-     * @brief Amount of data from a specific input actually used in one computation pass.
+     * @brief Total amount of consumed data from a specific input.
      *
      * @param inputIdx Index of the input analysed.
      * @return NbElts_t
@@ -136,7 +144,7 @@ public:
     virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
 
     /**
-     * @brief Amount of data ready to be used on a specific output.
+     * @brief Total amount of produced data ready to be used on a specific output.
      *
      * @param outputIdx Index of the output analysed.
      * @return NbElts_t
@@ -145,6 +153,8 @@ public:
 
     virtual void updateConsummerProducer();
 
+    virtual void resetConsummerProducer();
+
     virtual void forward();
 
     virtual void backward();
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 56245dd2dfd62d4dc765de6e3d43b08c144cc62b..bb961295bfaad2999af01460c49833085ff50a92 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -78,7 +78,7 @@ public:
         bool associated = true;
         for (IOIndex_t i = 0; i < nbInputs(); ++i) {
             if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
             }
             associated &= !(getInput(i)->empty());
         }
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..cb4ba871a55b9dfd1c835c05949c3c18966b7f5a
--- /dev/null
+++ b/include/aidge/operator/Pop.hpp
@@ -0,0 +1,95 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_POP_H_
+#define AIDGE_CORE_OPERATOR_POP_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/StaticAttributes.hpp"
+
+namespace Aidge {
+enum class PopAttr { ForwardStep };
+
+class Pop_Op : public OperatorTensor,
+    public Registrable<Pop_Op, std::string, std::unique_ptr<OperatorImpl>(const Pop_Op&)>,
+    public StaticAttributes<PopAttr, unsigned int> {
+public:
+    static const std::string Type;
+
+    using Attributes_ = StaticAttributes<PopAttr, unsigned int>;
+    template <PopAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
+    Pop_Op()
+        : OperatorTensor(Type, 1, 0, 1),
+          Attributes_(attr<PopAttr::ForwardStep>(0))
+    {
+        
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Pop_Op(const Pop_Op& op)
+        : OperatorTensor(op),
+          Attributes_(op)
+    {
+        mImpl = op.mImpl ? Registrar<Pop_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Pop_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Pop_Op>(*this);
+    }
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
+        mImpl = Registrar<Pop_Op>::create({name})(*this);
+        mOutputs[0]->setBackend(name, device);
+    }
+
+    void computeOutputDims() override final;
+    void updateConsummerProducer() override;
+    void forward() override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Pop(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Pop_Op>(), name);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::PopAttr>::data[] = {
+    "ForwardStep"
+};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_POP_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index fe9b044e2309eb7e724d6648b84c044d7407bafb..0731498dd3e06541ed82a86a98c2ae0bb355f413 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -45,6 +45,7 @@ public:
         Attributes_(attr<ProdAttr::Constant>(constant))
     {
         mOutputs[0]->resize(dims);
+        mImpl = std::make_shared<OperatorImpl>(*this);
     }
 
     Producer_Op(const std::shared_ptr<Tensor> tensor, bool constant = false)
@@ -52,6 +53,7 @@ public:
         Attributes_(attr<ProdAttr::Constant>(constant))
     {
         mOutputs[0] = tensor; // copy the pointer of the Tensor
+        mImpl = std::make_shared<OperatorImpl>(*this);
     }
 
     /**
@@ -65,7 +67,9 @@ public:
         for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
             mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i)));
         }
-        mImpl = op.mImpl ? Registrar<Producer_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        mImpl = (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}))
+            ? Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this)
+            : std::make_shared<OperatorImpl>(*this);
     }
 
     /**
@@ -88,7 +92,9 @@ public:
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Producer_Op>::create(name)(*this);
+        if (Registrar<Producer_Op>::exists({name})) {
+            mImpl = Registrar<Producer_Op>::create({name})(*this);
+        }
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -101,10 +107,10 @@ public:
 
 public:
     void forward() override final {
-        printf("Basic Producer forward() function.\n");
+        fmt::print("Basic Producer forward() function.\n");
     }
     void backward() override final {
-        printf("Basic Producer backward() function.\n");
+        fmt::print("Basic Producer backward() function.\n");
     }
     void setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) override {
         if (getAttr<ProdAttr::Constant>()) {
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..ab97bf3211edb53d65a90d16dba5d0c66dfa33da
--- /dev/null
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SIGMOID_H_
+#define AIDGE_CORE_OPERATOR_SIGMOID_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Sigmoid_Op : public OperatorTensor,
+    public Registrable<Sigmoid_Op, std::string, std::unique_ptr<OperatorImpl>(const Sigmoid_Op&)> {
+public:
+    static const std::string Type;
+
+    Sigmoid_Op() : OperatorTensor(Type, 1, 0, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Sigmoid_Op(const Sigmoid_Op& op)
+        : OperatorTensor(op)
+    {
+        mImpl = op.mImpl ? Registrar<Sigmoid_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Sigmoid_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Sigmoid_Op>(*this);
+    }
+
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
+        mImpl = Registrar<Sigmoid_Op>::create(name)(*this);
+        mOutputs[0]->setBackend(name, device);
+    }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Sigmoid(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Sigmoid_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..ce0dc12a06d242d215c07dc6593bb7e2cb2c3c8a
--- /dev/null
+++ b/include/aidge/operator/Tanh.hpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_TANH_H_
+#define AIDGE_CORE_OPERATOR_TANH_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Tanh_Op : public OperatorTensor,
+    public Registrable<Tanh_Op, std::string, std::unique_ptr<OperatorImpl>(const Tanh_Op&)> {
+public:
+    static const std::string Type;
+
+    Tanh_Op() : OperatorTensor(Type, 1, 0, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Tanh_Op(const Tanh_Op& op)
+        : OperatorTensor(op)
+    {
+        mImpl = op.mImpl ? Registrar<Tanh_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Tanh_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Tanh_Op>(*this);
+    }
+
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
+        mImpl = Registrar<Tanh_Op>::create(name)(*this);
+        mOutputs[0]->setBackend(name, device);
+    }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Tanh(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Tanh_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_TANH_H_ */
\ No newline at end of file
diff --git a/include/aidge/recipies/GraphViewHelper.hpp b/include/aidge/recipes/GraphViewHelper.hpp
similarity index 94%
rename from include/aidge/recipies/GraphViewHelper.hpp
rename to include/aidge/recipes/GraphViewHelper.hpp
index d7bcec713087054640c87c6fd229fee53d1ed4a6..c6204cdffa5e580190b8cd3f1817788a12e00bc3 100644
--- a/include/aidge/recipies/GraphViewHelper.hpp
+++ b/include/aidge/recipes/GraphViewHelper.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CORE_UTILS_RECIPIES_H_
-#define AIDGE_CORE_UTILS_RECIPIES_H_
+#ifndef AIDGE_CORE_UTILS_RECIPES_H_
+#define AIDGE_CORE_UTILS_RECIPES_H_
 
 #include <memory>
 #include <set>
diff --git a/include/aidge/recipies/LabelGraph.hpp b/include/aidge/recipes/LabelGraph.hpp
similarity index 90%
rename from include/aidge/recipies/LabelGraph.hpp
rename to include/aidge/recipes/LabelGraph.hpp
index 9dd77e5e9f397260cf936cf77b15616c17ea33b8..61f04c313bb4c12861b2a57299761208124e9cbf 100644
--- a/include/aidge/recipies/LabelGraph.hpp
+++ b/include/aidge/recipes/LabelGraph.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_RECIPIES_LABELGRAPH_H_
-#define AIDGE_RECIPIES_LABELGRAPH_H_
+#ifndef AIDGE_RECIPES_LABELGRAPH_H_
+#define AIDGE_RECIPES_LABELGRAPH_H_
 
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
@@ -32,4 +32,4 @@ NodePtr nodeLabel(NodePtr node);
 std::shared_ptr<GraphView> labelGraph(std::shared_ptr<GraphView> graph);
 } // namespace Aidge
 
-#endif /* AIDGE_RECIPIES_LABELGRAPH_H_ */
+#endif /* AIDGE_RECIPES_LABELGRAPH_H_ */
diff --git a/include/aidge/recipies/Recipies.hpp b/include/aidge/recipes/Recipes.hpp
similarity index 90%
rename from include/aidge/recipies/Recipies.hpp
rename to include/aidge/recipes/Recipes.hpp
index fb4bc22c69ec2b4e8dcc6178c9fcda0a85190f78..2f77ae707ff66a6d68f649796d1bf07cce1e4498 100644
--- a/include/aidge/recipies/Recipies.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CORE_UTILS_RECIPIES_H_
-#define AIDGE_CORE_UTILS_RECIPIES_H_
+#ifndef AIDGE_CORE_UTILS_RECIPES_H_
+#define AIDGE_CORE_UTILS_RECIPES_H_
 
 #include <memory>
 #include <set>
@@ -114,7 +114,13 @@ std::set<std::shared_ptr<Node>> getConvHorizontalTiling(const std::shared_ptr<No
 */
 void explicitCastMove(std::shared_ptr<GraphView> graphView);
 
+/**
+ * Flatten the graph by replacing the meta operators by their micro graph.
+ * @param recursive If true, recursively replace meta operators until there is
+ * no more meta operator in the graph.
+*/
+void expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive = false);
 
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
+#endif /* AIDGE_CORE_UTILS_RECIPES_H_ */
diff --git a/include/aidge/scheduler/MemoryManager.hpp b/include/aidge/scheduler/MemoryManager.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9f718e8df341b2303c876b903c4e0339461f88b2
--- /dev/null
+++ b/include/aidge/scheduler/MemoryManager.hpp
@@ -0,0 +1,324 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_MEMORY_MANAGER_H
+#define AIDGE_MEMORY_MANAGER_H
+
+#include <memory>
+#include <vector>
+#include <map>
+
+#include "aidge/graph/Node.hpp"
+
+namespace Aidge {
+class MemoryManager {
+public:
+    typedef int Clock_T;
+
+    enum OptimizeStrategy {
+        None,
+        OptimizeMaxLifetimeMinSizeFirst,
+        OptimizeMaxLifetimeMaxSizeFirst,
+        OptimizeMaxHoleMaxLifetimeFirst
+    };
+
+    // MemorySpace are contiguous, non-overlapping memory blocks, that can be
+    // re-arranged freely.
+    struct MemorySpace {
+        MemorySpace(Clock_T clock_,
+                    unsigned int offset_,
+                    unsigned int size_,
+                    std::set<std::shared_ptr<Node> > dependencies_
+                        = std::set<std::shared_ptr<Node> >()
+        ):
+            offset(offset_),
+            size(size_),
+            dependencies(dependencies_),
+            allocated(clock_),
+            released(-1) {}
+
+        unsigned int offset;
+        unsigned int size;
+        std::set<std::shared_ptr<Node> > dependencies;
+        Clock_T allocated;
+        Clock_T released;
+    };
+
+    // MemoryPlane belongs to a MemorySpace. Any number of potentially
+    // overlapping planes can be associated to a MemorySpace.
+    // MemoryPlane can be non-contiguous (in case of stride, or wrapping, when
+    // offset + size > memSpace.size).
+    // MemoryPlane cannot be re-arranged inside a MemorySpace.
+    struct MemoryPlane {
+        MemoryPlane(std::shared_ptr<MemorySpace> memSpace_,
+                    Clock_T clock_,
+                    unsigned int offset_,
+                    unsigned int size_,
+                    unsigned int stride_ = 0,
+                    unsigned int length_ = 1,
+                    unsigned int count_ = 1
+        ):
+            memSpace(memSpace_),
+            allocated(clock_),
+            offset(offset_),
+            size(size_),
+            stride(std::max(size_, stride_)),
+            length(length_),
+            count(count_)
+        {
+            assert(offset <= memSpace->size);
+            // The preceding assert should allow offset == memSpace->size (see 
+            // issue #63). This means immediate wrapping.
+            // It appends if the final offset computed in reallocate() is at
+            // the end of the previous memPlane and is also at the end of the
+            // memSpace (in case for example of in-place memory op.).
+            // Instead of bringing the offset back to the beginning of the 
+            // memSpace, we stay attached to this offset in case the memSpace
+            // grows when a new memPlane is added.
+
+            assert(getContiguousOffset() >= memSpace->offset);
+            assert(getWrappedOffset() >= memSpace->offset);
+            assert(getContiguousOffset() + getContiguousSize()
+                <= memSpace->offset + memSpace->size);
+            assert(getWrappedOffset() + getWrappedSize()
+                <= memSpace->offset + memSpace->size);
+        }
+
+        inline unsigned int getSize() const {
+            return stride * length * count;
+        }
+
+        inline unsigned int getUsefulSize() const {
+            return size * length * count;
+        }
+
+        inline unsigned int getContiguousOffset() const {
+            return memSpace->offset + offset;
+        }
+
+        inline unsigned int getContiguousSize() const {
+            return std::min(getSize(), getLimit());
+        }
+
+        inline unsigned int getWrappedOffset() const {
+            return memSpace->offset;
+        }
+
+        inline unsigned int getWrappedSize() const {
+            return getSize() - getContiguousSize();
+        }
+
+        inline unsigned int getFinalOffset() const {
+            return (getWrappedSize() > 0)
+                ? getWrappedOffset() + getWrappedSize()
+                : getContiguousOffset() + getContiguousSize();
+        }
+
+        inline unsigned int getUpperOffset() const {
+            return (getContiguousOffset() + getContiguousSize());
+        }
+
+        // Limit is computed dynamically, as memSpace->size may increase after
+        // the creation of this memory space. This is actually necessary to
+        // ensure that the memory wrapping works correctly, because when 
+        // computing the margin required for the wrapping, it is assumed that
+        // the previous layer wrapping extends to the full memory space size.
+        inline unsigned int getLimit() const {
+            // limit must be a multiple of (stride * length) if count > 1
+            // or stride if length > 1
+            // uses floor() to stay below memSpace->size
+            return (count > 1)
+                ? std::floor((memSpace->size - offset)
+                        / static_cast<double>(stride * length)) * (stride * length)
+                : (length > 1)
+                    ? std::floor((memSpace->size - offset)
+                            / static_cast<double>(stride)) * stride
+                    : memSpace->size - offset;
+        }
+
+        std::shared_ptr<MemorySpace> memSpace;
+        Clock_T allocated;
+        unsigned int offset;
+        unsigned int size;
+        unsigned int stride;
+        unsigned int length;
+        unsigned int count;
+    };
+
+    struct MaxLifetimeMinSizeFirst {
+        MaxLifetimeMinSizeFirst(unsigned int maxLifetime_)
+            : maxLifetime(maxLifetime_) {}
+        const unsigned int maxLifetime;
+
+        bool operator()(const std::shared_ptr<MemorySpace>& p0,
+                        const std::shared_ptr<MemorySpace>& p1);
+    };
+
+    struct MaxLifetimeMaxSizeFirst {
+        MaxLifetimeMaxSizeFirst(unsigned int maxLifetime_)
+            : maxLifetime(maxLifetime_) {}
+        const unsigned int maxLifetime;
+
+        bool operator()(const std::shared_ptr<MemorySpace>& p0,
+                        const std::shared_ptr<MemorySpace>& p1);
+    };
+
+    struct MaxHoleMaxLifetimeFirst {
+        MaxHoleMaxLifetimeFirst(unsigned int maxLifetime_, MemoryManager* inst_)
+            : maxLifetime(maxLifetime_),
+              inst(inst_) {}
+        const unsigned int maxLifetime;
+        MemoryManager* inst;
+
+        bool operator()(const std::shared_ptr<MemorySpace>& p0,
+                        const std::shared_ptr<MemorySpace>& p1);
+    };
+
+    struct CompByNodeName {
+        bool operator()(const std::shared_ptr<Node>& lhs,
+                        const std::shared_ptr<Node>& rhs) const
+        {
+            return lhs->name() < rhs->name();
+        }
+    };
+
+    typedef std::map<std::shared_ptr<Node>, std::vector<MemoryPlane>,
+        CompByNodeName> MemMap_T;
+
+    MemoryManager(): mClock(0) {}
+    /// Generates a new MemorySpace
+    std::shared_ptr<MemorySpace> reserve(unsigned int size,
+                                    const std::set<std::shared_ptr<Node> >&
+                          dependencies = std::set<std::shared_ptr<Node> >());
+    /// Expand an existing MemorySpace, without affecting its MemoryPlane
+    /// This function rebuild the memory stack mMemStack
+    void expand(std::shared_ptr<MemorySpace> memSpace,
+                unsigned int requiredSize);
+    /// Generates a MemoryPlane in a new MemorySpace
+    MemoryPlane allocate(unsigned int size,
+                         const std::set<std::shared_ptr<Node> >&
+                          dependencies = std::set<std::shared_ptr<Node> >(),
+                         unsigned int stride = 0,
+                         unsigned int length = 1,
+                         unsigned int count = 1);
+    /// Generates a MemoryPlane in a new MemorySpace, associated to a Node
+    unsigned int allocate(const std::shared_ptr<Node>& node,
+                          unsigned int size,
+                          const std::set<std::shared_ptr<Node> >&
+                          dependencies = std::set<std::shared_ptr<Node> >(),
+                          unsigned int stride = 0,
+                          unsigned int length = 1,
+                          unsigned int count = 1);
+    bool isWrapAround(std::shared_ptr<MemorySpace> memSpace,
+                      unsigned int offset,
+                      unsigned int size,
+                      unsigned int stride = 0,
+                      unsigned int length = 1,
+                      unsigned int count = 1) const;
+    /// Generate a new MemoryPlane in an existing MemorySpace
+    MemoryPlane reallocate(std::shared_ptr<MemorySpace> memSpace,
+                           unsigned int offset,
+                           unsigned int size,
+                           bool wrapAround,
+                           unsigned int extraSize = 0,
+                           const std::set<std::shared_ptr<Node> >&
+                additionalDependencies = std::set<std::shared_ptr<Node> >(),
+                           unsigned int stride = 0,
+                           unsigned int length = 1,
+                           unsigned int count = 1);
+    /// Generate a new MemoryPlane directly following an existing MemoryPlane
+    /// memPlane with an additionnal offset extraOffset
+    MemoryPlane reallocate(const MemoryPlane& memPlane,
+                           unsigned int extraOffset,
+                           unsigned int size,
+                           bool wrapAround,
+                           unsigned int extraSize = 0,
+                           const std::set<std::shared_ptr<Node> >&
+                additionalDependencies = std::set<std::shared_ptr<Node> >(),
+                           unsigned int stride = 0,
+                           unsigned int length = 1,
+                           unsigned int count = 1);
+    /// Generate a new MemoryPlane in an existing MemorySpace, associated to a 
+    /// Node
+    unsigned int reallocate(std::shared_ptr<MemorySpace> memSpace,
+                            const std::shared_ptr<Node>& node,
+                            unsigned int offset,
+                            unsigned int size,
+                            bool wrapAround,
+                            unsigned int extraSize = 0,
+                            const std::set<std::shared_ptr<Node> >&
+                additionalDependencies = std::set<std::shared_ptr<Node> >(),
+                            unsigned int stride = 0,
+                            unsigned int length = 1,
+                            unsigned int count = 1);
+    /// Generate a new MemoryPlane directly following an existing MemoryPlane
+    /// memPlane with an additionnal offset extraOffset
+    unsigned int reallocate(const MemoryPlane& memPlane,
+                            const std::shared_ptr<Node>& node,
+                            unsigned int extraOffset,
+                            unsigned int size,
+                            bool wrapAround,
+                            unsigned int extraSize = 0,
+                            const std::set<std::shared_ptr<Node> >&
+                additionalDependencies = std::set<std::shared_ptr<Node> >(),
+                            unsigned int stride = 0,
+                            unsigned int length = 1,
+                            unsigned int count = 1);
+
+    unsigned int release(std::shared_ptr<MemorySpace> memSpace);
+    unsigned int release(const std::shared_ptr<Node>& node);
+    unsigned int releaseDependencies(const std::shared_ptr<Node>& node);
+    void optimize(OptimizeStrategy strategy);
+    unsigned int getOffset(const std::shared_ptr<Node>& node,
+                           unsigned int plane = 0) const;
+    unsigned int getSize(const std::shared_ptr<Node>& node,
+                         unsigned int plane) const;
+    unsigned int getSize(const std::shared_ptr<Node>& node) const;
+    unsigned int getNbPlanes(const std::shared_ptr<Node>& node) const;
+    unsigned int getPeakUsage() const;
+    Clock_T getMaxLifetime() const;
+    const std::vector<MemoryPlane>& getPlanes(const std::shared_ptr<Node>& node)
+        const;
+    const MemMap_T& getPlanes() const { return mMemPlanes; }
+    MemMap_T getPlanes(std::shared_ptr<MemorySpace> memSpace) const;
+    unsigned int getNbPlanes(std::shared_ptr<MemorySpace> memSpace) const;
+    Clock_T getCurrentTick() const { return mClock; };
+    void tick();
+    void log(const std::string& fileName) const;
+
+private:
+    /// Find a valid offset in the memory stack that can fit a contiguous chunk
+    /// of memory of size @size
+    unsigned int onStack(unsigned int size);
+    unsigned int offStack(unsigned int offset);
+    std::map<unsigned int, unsigned int> getStack(
+        std::shared_ptr<MemorySpace> memSpace,
+        Clock_T clock) const;
+    std::pair<Clock_T, unsigned int> getMaxHole(
+        std::shared_ptr<MemorySpace> memSpace) const;
+
+    std::map<unsigned int, unsigned int> mMemStack;
+    std::vector<std::shared_ptr<MemorySpace> > mMemSpaces;
+    MemMap_T mMemPlanes;
+    Clock_T mClock;
+};
+}
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::MemoryManager::OptimizeStrategy>::data[]
+    = {"None",
+       "OptimizeMaxLifetimeMinSizeFirst",
+       "OptimizeMaxLifetimeMaxSizeFirst",
+       "OptimizeMaxHoleMaxLifetimeFirst"};
+}
+
+#endif // AIDGE_MEMORY_MANAGER_H
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 7a81503c967adce3ee000c36ee2f509901cda9ec..747785bf886889aed273c944904ddbb6198c4968 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -17,8 +17,12 @@
 #include <set>
 #include <string>
 #include <vector>
+#include <map>
+
+#include "aidge/utils/Types.h"
 
 #include "aidge/data/Tensor.hpp"
+#include "aidge/scheduler/MemoryManager.hpp"
 
 namespace Aidge {
 class Node;
@@ -38,19 +42,31 @@ private:
         std::chrono::time_point<std::chrono::high_resolution_clock> end;
     };
 
+    struct PriorProducersConsumers {
+        bool isPrior = false;
+        std::set<std::shared_ptr<Aidge::Node>> requiredProducers;
+        std::set<std::shared_ptr<Aidge::Node>> priorConsumers;
+    };
+
 public:
-    SequentialScheduler(std::shared_ptr<GraphView> graphView)
-        : mGraphView(graphView)
+    SequentialScheduler(std::shared_ptr<GraphView> graphView, std::shared_ptr<Node> upperNode = nullptr)
+        : mGraphView(graphView),
+          mUpperNode(upperNode)
     {
         // ctor
     };
     ~SequentialScheduler() = default;
 
     void generateScheduling(bool verbose = false);
-    inline void resetScheduling() {
-        mScheduling.clear();
-        mStaticSchedule.clear();
-    }
+    void resetScheduling();
+
+    /**
+     * Generate the memory layout for the current static scheduling.
+     * @param incProducers If true, include the producers in the memory layout.
+     * @param wrapAroundBuffer If true, allow wrapping in memory planes.
+    */
+    MemoryManager generateMemory(bool incProducers = false, bool wrapAroundBuffer = false) const;
+
     /**
      * @brief Place the data tensors inside in the data input tensor of the graphView. In case of multiple data input tensors, they are mapped to producers in the order given by the graph.
      * 
@@ -73,8 +89,8 @@ public:
      * @brief Return a vector of Node ordered by the order they are called by the scheduler
      * @return std::vector<std::shared_ptr<Node>>
      */
-    inline std::vector<std::shared_ptr<Node>> getStaticScheduling() const noexcept {
-        return mStaticSchedule;
+    inline std::vector<std::shared_ptr<Node>> getStaticScheduling(size_t step = 0) const noexcept {
+        return mStaticSchedule.at(step);
     }
     inline std::shared_ptr<GraphView> getGraphView() const noexcept {
         return mGraphView;
@@ -88,13 +104,18 @@ private:
      * @return std::set<std::shared_ptr<Node>>
      */
     std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const;
+    NbElts_t getNbAvailableData(const std::shared_ptr<Node>& node, const IOIndex_t inputIdx) const;
+    PriorProducersConsumers getPriorProducersConsumers(const std::shared_ptr<Node>& node) const;
 
     /** @brief Shared ptr to the scheduled graph view */
     std::shared_ptr<GraphView> mGraphView;
+    /** @brief Shared ptr to the upper node containing the graph view */
+    std::weak_ptr<Node> mUpperNode;
     /** @brief List of SchedulingElement (i.e: Nodes with their computation time) */
     std::vector<SchedulingElement> mScheduling;
     /** @brief List of nodes ordered by their */
-    std::vector<std::shared_ptr<Node>> mStaticSchedule;
+    std::vector<std::vector<std::shared_ptr<Node>>> mStaticSchedule;
+    size_t mStaticScheduleStep = 0;
 };
 } // namespace Aidge
 
diff --git a/include/aidge/utils/ErrorHandling.hpp b/include/aidge/utils/ErrorHandling.hpp
index 8fbeff30abecfec0077786b21825b6a6f36677c6..653a774b92e26513c9ac555e0aec1daed793e208 100644
--- a/include/aidge/utils/ErrorHandling.hpp
+++ b/include/aidge/utils/ErrorHandling.hpp
@@ -13,30 +13,18 @@
 #ifndef AIDGE_ERRORHANDLING_H_
 #define AIDGE_ERRORHANDLING_H_
 
-#include <cstdio>
 #include <memory>
 
-#define AIDGE_STRINGIZE_DETAIL(x) #x
-#define AIDGE_STRINGIZE(x) AIDGE_STRINGIZE_DETAIL(x)
+#include <fmt/format.h>
+#include <fmt/ranges.h>
 
 #ifdef NO_EXCEPTION
 #define AIDGE_THROW_OR_ABORT(ex, ...) \
-do { std::printf(__VA_ARGS__); std::abort(); } while (false)
+do { fmt::print(__VA_ARGS__); std::abort(); } while (false)
 #else
 #include <stdexcept>
-#include <memory>
 #define AIDGE_THROW_OR_ABORT(ex, ...) \
-do { \
-    int n = 128; \
-    std::unique_ptr<char[]> formatted; \
-    formatted.reset(new char[n]); \
-    const int len = std::snprintf(formatted.get(), n, __VA_ARGS__); \
-    if (len >= n) { \
-        formatted.reset(new char[len + 1]); \
-        std::snprintf(formatted.get(), len + 1, __VA_ARGS__); \
-    }; \
-    throw ex(formatted.get()); \
-} while (false)
+throw ex(fmt::format(__VA_ARGS__))
 #endif
 
 /**
@@ -45,7 +33,7 @@ do { \
  * If it asserts, it means an user error.
 */
 #define AIDGE_ASSERT(stm, ...) \
-if (!(stm)) { printf("Assertion failed: " AIDGE_STRINGIZE(stm) " in " __FILE__ ":%d", __LINE__); \
+if (!(stm)) { fmt::print("Assertion failed: " #stm " in {}:{}", __FILE__, __LINE__); \
     AIDGE_THROW_OR_ABORT(std::runtime_error, __VA_ARGS__); }
 
 /**
@@ -54,6 +42,6 @@ if (!(stm)) { printf("Assertion failed: " AIDGE_STRINGIZE(stm) " in " __FILE__ "
  * If it asserts, it means a bug.
 */
 #define AIDGE_INTERNAL_ASSERT(stm) \
-assert((stm) && "Internal assertion failed: " #stm " in " __FILE__ ":" AIDGE_STRINGIZE(__LINE__))
+assert((stm) && "Internal assertion failed")
 
 #endif //AIDGE_ERRORHANDLING_H_
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 66a07eb0ce21354b20f1ca416cc68d26d9bd6280..4d604d520d3d8af532e196c7785896ddc1c242d0 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -16,6 +16,8 @@
 #include <pybind11/pybind11.h>
 #endif
 
+#include "aidge/utils/ErrorHandling.hpp"
+
 #include <functional>
 #include <map>
 #include <cassert>
@@ -55,7 +57,7 @@ struct Registrar {
     typedef typename C::registrar_type registrar_type;
 
     Registrar(const registrar_key& key, registrar_type func) {
-        //printf("REGISTRAR: %s\n", key.c_str());
+        //fmt::print("REGISTRAR: {}\n", key);
         bool newInsert;
         std::tie(std::ignore, newInsert) = C::registry().insert(std::make_pair(key, func));
         //assert(newInsert && "registrar already exists");
@@ -68,7 +70,7 @@ struct Registrar {
 
     static auto create(const registrar_key& key){
         const auto it = C::registry().find(key);
-        assert(it != C::registry().end() && "invalid registrar key");
+        AIDGE_ASSERT(it != C::registry().end(), "missing or invalid registrar key: {}\nDid you include/import the corresponding module?", key);
 
         return (*it).second;
     }
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index be00932e47a93cc4349d39f6cad542cec506c38a..6bf59155373cf73d158fce4eb5bda58f7d279e69 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -88,25 +88,25 @@ public:
 
     // Runtime access with name
     template <typename R>
-    R& getAttr(const char* name) {
+    R& getAttr(const std::string& name) {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (strcmp(EnumStrings<ATTRS_ENUM>::data[i], name) == 0) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 return getAttr<R>(i);
             }
         }
 
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name);
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"{}\" not found", name);
     }
 
     template <typename R>
-    const R& getAttr(const char* name) const {
+    const R& getAttr(const std::string& name) const {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (strcmp(EnumStrings<ATTRS_ENUM>::data[i], name) == 0) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 return getAttr<R>(i);
             }
         }
 
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name);
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"{}\" not found", name);
     }
 
     template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
@@ -116,7 +116,7 @@ public:
                 return reinterpret_cast<R&>(std::get<SIZE-1>(mAttrs));
             }
             else {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "wrong type for attribute with index %lu", i);
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "wrong type for attribute with index {}", i);
             }
         }
         else {
@@ -136,7 +136,7 @@ public:
                 return reinterpret_cast<const R&>(std::get<SIZE-1>(mAttrs));
             }
             else {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "wrong type for attribute with index %lu", i);
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "wrong type for attribute with index {}", i);
             }
         }
         else {
@@ -190,7 +190,7 @@ public:
             }
         }
 
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name.c_str());
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"{}\" not found", name);
     }
 
     std::set<std::string> getAttrsName() const override final {
@@ -227,7 +227,7 @@ public:
             }
         }
 
-        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str());
+        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name);
     }
 
 
@@ -242,7 +242,7 @@ public:
                 return;
             }
         }
-        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str());
+        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name);
     }
     #endif
 
diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp
index 34610069079ee792ebbe4b261b57177b3bbe2997..a2a5e6b8bb2d0f2413ef94c360b383608c5b41b5 100644
--- a/python_binding/backend/pybind_OperatorImpl.cpp
+++ b/python_binding/backend/pybind_OperatorImpl.cpp
@@ -102,6 +102,15 @@ public:
 
         );
     }
+    void resetConsummerProducer() override {
+        PYBIND11_OVERRIDE_NAME(
+            void,
+            OperatorImpl,
+            "reset_consummer_producer",
+            resetConsummerProducer,
+
+        );
+    }
 };
 
 void init_OperatorImpl(py::module& m){
@@ -116,6 +125,7 @@ void init_OperatorImpl(py::module& m){
     .def("get_nb_consumed_data", &OperatorImpl::getNbConsumedData)
     .def("get_nb_produced_data", &OperatorImpl::getNbProducedData)
     .def("update_consummer_producer", &OperatorImpl::updateConsummerProducer)
+    .def("reset_consummer_producer", &OperatorImpl::resetConsummerProducer)
     ;
 }
 }
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index 3e9f015250acda34f0ae55af38f67df3ca4ad180..df3792fd784a2ef2b9418628959629ac59c04094 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -30,7 +30,7 @@ void init_Data(py::module& m){
     ;
 
     py::class_<Data, std::shared_ptr<Data>>(m,"Data")
-    .def(py::init<const char*>());
+    .def(py::init<const std::string&>());
 
     
 }
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index f8a0567bdc7bb27bdff1137a020857cac5a45604..93389edf663a6154daf0b9ef2a7cc4095abc4d0f 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -46,7 +46,7 @@ void addCtor(py::class_<Tensor,
             newTensor->setBackend(backend);
             newTensor->getImpl()->copyFromHost(static_cast<T*>(info.ptr), newTensor->size());
         }else{
-            AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend %s, verify you have `import aidge_backend_%s`.\n", backend.c_str(), backend.c_str());
+            AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend {}, verify you have `import aidge_backend_{}`.\n", backend, backend);
         }
 
         return newTensor;
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index c41d99c1a5b034424da06aa9a6c5ba5c6aabbca3..a41d0d92835be2b5ef07d30c4a5233da1e3906b7 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -45,6 +45,9 @@ void init_GraphView(py::module& m) {
           :rtype: list[Node]
           )mydelimiter")
 
+          .def("set_ordered_inputs", &GraphView::setOrderedInputs, py::arg("inputs"))
+          .def("set_ordered_outputs", &GraphView::setOrderedOutputs, py::arg("outputs"))
+
           .def("add", (void (GraphView::*)(std::shared_ptr<Node>, bool)) & GraphView::add,
                py::arg("other_node"), py::arg("include_learnable_parameters") = true,
           R"mydelimiter(
@@ -86,7 +89,19 @@ void init_GraphView(py::module& m) {
           :type to_tensor: int
           )mydelimiter")
 
-          .def_static("replace", &GraphView::replace, py::arg("old_nodes"), py::arg("new_nodes"),
+          .def_static("replace", py::overload_cast<const std::shared_ptr<GraphView>&, const std::shared_ptr<GraphView>&>(&GraphView::replace), py::arg("old_graph"), py::arg("new_graph"),
+          R"mydelimiter(
+          Replace the old set of Nodes in a GraphView with the new set of given Nodes in a GraphView if possible in every GraphView.
+
+          :param old_graph: GraphView of Nodes actually connected in GraphViews.
+          :type old_graph: GraphView
+          :param new_graph: GraphView of Nodes with inner connections already taken care of.
+          :type new_graph: GraphView
+          :return: Whether any replacement has been made.
+          :rtype: bool
+          )mydelimiter")
+
+          .def_static("replace", py::overload_cast<const std::set<NodePtr>&, const std::set<NodePtr>&>(&GraphView::replace), py::arg("old_nodes"), py::arg("new_nodes"),
           R"mydelimiter(
           Replace the old set of Nodes with the new set of given Nodes if possible in every GraphView.
 
@@ -118,5 +133,7 @@ void init_GraphView(py::module& m) {
           //           }
           //      })
             ;
+
+     m.def("get_connected_graph_view", &getConnectedGraphView);
 }
 }  // namespace Aidge
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 83f5688fa3d9e459a364ee3e74975a23d09c236c..71f5b368bcd358231439dead96ade266b313cf6c 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -23,6 +23,7 @@ namespace py = pybind11;
 namespace Aidge {
 void init_Node(py::module& m) {
     py::class_<Node, std::shared_ptr<Node>>(m, "Node")
+    .def(py::init<std::shared_ptr<Operator>, const std::string&>(), py::arg("op"), py::arg("name") = "")
     .def("name", &Node::name,
     R"mydelimiter(
     Name of the Node.
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index dc586b7d947c6d8433fabe2fbfaa0990de5c132a..0ca01c07535f65ac1161603d32d191881eb28746 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -40,8 +40,8 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
   m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims) {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
 
         return AvgPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()));
     }, py::arg("kernel_dims"),
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index c81c7ade4de50e6879fd32c59f6574b14c473398..e11fc288fb9eb837c0a7b36c0a1c4024ab6c8633 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -22,6 +22,9 @@ namespace Aidge {
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
     py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+    .def(py::init<float, float>(),
+            py::arg("epsilon"),
+            py::arg("momentum"))
     .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
     .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
     .def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 455ea4024438b97b7ac6f07e5fc6722658b42ea4..346acc5d9d05c24e9538c3b8c5edf1f7e37d6ba8 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -48,9 +48,9 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
                                                          const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &dilation_dims) {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
-        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
         return Conv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("in_channels"),
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index d858336b6578b580378778f64984ba565e28f941..e25024e09cdd4fe234416a9aa8f0fef91a3c27fe 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -46,9 +46,9 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
                                                                   const std::vector<DimSize_t> &dilation_dims) {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
-        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
         return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("nb_channenls"),
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 485e0eaf6e6e68367ae9037fd922da07433a76e3..9c83a67e81120e2cc2674e3ceb4c8871dd6fd393 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -43,8 +43,8 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
                                                                   bool ceil_mode) {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
 
         return MaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index b043ac23c378b9d591b7d1273ebcb5d48a37394a..20a620cee737db5380ee7641b161cf6296ef7e5b 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -32,10 +32,10 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
                                                          const std::vector<DimSize_t> &padding_dims,
                                                          const std::vector<DimSize_t> &dilation_dims)
     {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
-        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
-        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
         return PaddedConv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("in_channels"),
@@ -55,10 +55,10 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
                                                          const std::vector<DimSize_t> &padding_dims,
                                                          const std::vector<DimSize_t> &dilation_dims)
     {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
-        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
-        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
         return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("nb_channels"),
@@ -76,9 +76,9 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims)
     {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
-        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
 
         return PaddedAvgPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()));
     }, py::arg("kernel_dims"),
@@ -95,9 +95,9 @@ template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
                                                          const std::vector<DimSize_t> &padding_dims,
                                                          bool ceil_mode)
     {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
-        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
 
         return PaddedMaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
@@ -108,6 +108,14 @@ template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
 
 }
 
+void declare_LSTMOp(py::module &m) {
+  m.def("LSTM", &LSTM, py::arg("in_channels"),
+       py::arg("hidden_channels"),
+       py::arg("seq_length"),
+       py::arg("nobias") = false,
+       py::arg("name") = "");
+}
+
 void init_MetaOperatorDefs(py::module &m) {
   declare_PaddedConvOp<1>(m);
   declare_PaddedConvOp<2>(m);
@@ -121,8 +129,12 @@ void init_MetaOperatorDefs(py::module &m) {
   declare_PaddedMaxPoolingOp<1>(m);
   declare_PaddedMaxPoolingOp<2>(m);
   declare_PaddedMaxPoolingOp<3>(m);
+  declare_LSTMOp(m);
 
   py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperator_Op", py::multiple_inheritance())
+  .def(py::init<const char *, const std::shared_ptr<GraphView>&>(),
+          py::arg("type"),
+          py::arg("graph"))
   .def("get_micro_graph", &MetaOperator_Op::getMicroGraph);
 
   m.def("meta_operator", &MetaOperator,
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index df3fdc297ce44cf96ff26bffb4cd96fa1fe8fe22..69d63fe7b8d31a6fa9747df2ce4a93ec4a0f4cac 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -43,7 +43,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
                                                         const std::string& name,
                                                         const PadBorderType &borderType = PadBorderType::Constant,
                                                         double borderValue = 0.0) {
-        AIDGE_ASSERT(beginEndTuples.size() == 2*DIM, "begin_end_tuples size [%ld] does not match DIM [%d]", beginEndTuples.size(), 2*DIM);
+        AIDGE_ASSERT(beginEndTuples.size() == 2*DIM, "begin_end_tuples size [{}] does not match DIM [{}]", beginEndTuples.size(), 2*DIM);
         return Pad<DIM>(to_array<2*DIM>(beginEndTuples.begin()), name, borderType, borderValue);
     },
        py::arg("begin_end_tuples"),
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..91726fc1d4721df1be712a26721d09b1a98fd9a2
--- /dev/null
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Pop.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Pop(py::module& m) {
+    py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor, Attributes>(m, "PopOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Pop_Op::getInputsName)
+    .def("get_outputs_name", &Pop_Op::getOutputsName);
+
+    m.def("Pop", &Pop, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 1a50edba03f62e6c43ff60320fe4c3d5caa65f41..11e979736dcab211aa11758cb3138f9d6827cc4e 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -34,7 +34,7 @@ template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
   m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes,
                                                                 DimSize_t keepDims,
                                                                 const std::string& name) {
-        AIDGE_ASSERT(axes.size() == DIM, "axes size [%ld] does not match DIM [%d]", axes.size(), DIM);
+        AIDGE_ASSERT(axes.size() == DIM, "axes size [{}] does not match DIM [{}]", axes.size(), DIM);
 
         return ReduceMean<DIM>(to_array<DIM>(axes.begin()), keepDims, name);
     }, py::arg("axes"),
diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2393e56c10ef37e4eee078fe6f8bee4abd77ac39
--- /dev/null
+++ b/python_binding/operator/pybind_Sigmoid.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Sigmoid.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Sigmoid(py::module& m) {
+    py::class_<Sigmoid_Op, std::shared_ptr<Sigmoid_Op>, OperatorTensor>(m, "SigmoidOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Sigmoid_Op::getInputsName)
+    .def("get_outputs_name", &Sigmoid_Op::getOutputsName);
+
+    m.def("Sigmoid", &Sigmoid, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2f3140039b030505af860352372c865c1aab05e3
--- /dev/null
+++ b/python_binding/operator/pybind_Tanh.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Tanh.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Tanh(py::module& m) {
+    py::class_<Tanh_Op, std::shared_ptr<Tanh_Op>, OperatorTensor>(m, "TanhOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Tanh_Op::getInputsName)
+    .def("get_outputs_name", &Tanh_Op::getOutputsName);
+
+    m.def("Tanh", &Tanh, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index d535a2c932c8d61c0395f03ffc0978caf7ad692f..f5fbaf0e75ddd81265fd17e0aeb18b54f3908627 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -35,7 +35,7 @@ void declare_Transpose(py::module &m) {
 
   m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
                                                                   const std::string& name) {
-        AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [%ld] does not match DIM [%d]", output_dims_order.size(), DIM);
+        AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [{}] does not match DIM [{}]", output_dims_order.size(), DIM);
         return Transpose<DIM>(to_array<DIM>(output_dims_order.begin()), name);
     }, py::arg("output_dims_order"),
        py::arg("name") = "");
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 40ebc92aa872baacb2e7a87060f560d05d615cff..e80dd96f3758e9bd67484ed24714b633df619b0f 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -45,14 +45,17 @@ void init_MetaOperatorDefs(py::module&);
 void init_Mul(py::module&);
 void init_Producer(py::module&);
 void init_Pad(py::module&);
+void init_Pop(py::module&);
 void init_Pow(py::module&);
 void init_ReduceMean(py::module&);
 void init_ReLU(py::module&);
 void init_Reshape(py::module&);
+void init_Sigmoid(py::module&);
 void init_Slice(py::module&);
 void init_Softmax(py::module&);
 void init_Sqrt(py::module&);
 void init_Sub(py::module&);
+void init_Tanh(py::module&);
 void init_Transpose(py::module&);
 void init_Identity(py::module&);
 
@@ -64,7 +67,7 @@ void init_Connector(py::module&);
 void init_GraphRegex(py::module&);
 void init_MatchSolution(py::module&);
 
-void init_Recipies(py::module&);
+void init_Recipes(py::module&);
 
 void init_Scheduler(py::module&);
 void init_TensorUtils(py::module&);
@@ -104,14 +107,17 @@ void init_Aidge(py::module& m){
     init_Mul(m);
     init_Pad(m);
 
+    init_Pop(m);
     init_Pow(m);
     init_ReduceMean(m);
     init_ReLU(m);
     init_Reshape(m);
+    init_Sigmoid(m);
     init_Slice(m);
     init_Softmax(m);
     init_Sqrt(m);
     init_Sub(m);
+    init_Tanh(m);
     init_Transpose(m);
     init_Identity(m);
 
@@ -120,7 +126,7 @@ void init_Aidge(py::module& m){
     init_GraphRegex(m);
     init_MatchSolution(m);
 
-    init_Recipies(m);
+    init_Recipes(m);
     init_Scheduler(m);
     init_TensorUtils(m);
 }
diff --git a/python_binding/recipies/pybind_Recipies.cpp b/python_binding/recipes/pybind_Recipes.cpp
similarity index 97%
rename from python_binding/recipies/pybind_Recipies.cpp
rename to python_binding/recipes/pybind_Recipes.cpp
index bd058defb21c13cea1323e4748129c92519de039..f122c411618ce28a641fd46ee568f99cc48e9f58 100644
--- a/python_binding/recipies/pybind_Recipies.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -15,13 +15,13 @@
 #include <cstddef>
 #include <string>
 
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 
 namespace Aidge {
-void init_Recipies(py::module &m) {
+void init_Recipes(py::module &m) {
 
 
   m.def("fuse_mul_add", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseMulAdd), py::arg("graph_view"), R"mydelimiter(
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index 4eb715e799158a1ead143430f574f98059662666..170aa6c271a4f08ff5ad2801b754b647fee56df6 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -24,7 +24,7 @@ void init_Scheduler(py::module& m){
     .def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name"))
     .def("resetScheduling", &SequentialScheduler::resetScheduling)
     .def("generate_scheduling", &SequentialScheduler::generateScheduling, py::arg("verbose")=false)
-    .def("get_static_scheduling", &SequentialScheduler::getStaticScheduling)
+    .def("get_static_scheduling", &SequentialScheduler::getStaticScheduling, py::arg("step") = 0)
     ;
 }
 }
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index b76bf33367221add6273e02590d6ec315cfa4544..1911da228c83d66117a2591adf47dc07cd8dc674 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -68,6 +68,11 @@ void Aidge::OperatorImpl::updateConsummerProducer(){
     }
 }
 
+void Aidge::OperatorImpl::resetConsummerProducer(){
+    std::fill(mNbConsumedData.begin(), mNbConsumedData.end(), 0);
+    std::fill(mNbProducedData.begin(), mNbProducedData.end(), 0);
+}
+
 void Aidge::OperatorImpl::forward() {
     AIDGE_THROW_OR_ABORT(std::runtime_error, "forward() not implemented");
 }
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index a93d9af8a972605b1519e9974971ff9e7ad3ef2f..3681ac533cab36d68e5243fe0486b7d0febca694 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -15,10 +15,16 @@
 #include <utility>
 #include <numeric>
 
+#include <fmt/format.h>
+#include <fmt/ranges.h>
+
 #include "aidge/utils/Types.h"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/operator/MetaOperator.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
 ///////////////////////////////////////////////////////
@@ -54,39 +60,54 @@ std::string Aidge::GraphView::name() const { return mName; }
 
 void Aidge::GraphView::setName(const std::string &name) { mName = name; }
 
+void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProducers) const {
+    auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((path + ".mmd").c_str(), "w"), &std::fclose);
 
-void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers) const {
-    FILE *fp = std::fopen((path + ".mmd").c_str(), "w");
-    std::fprintf(fp,
-                "%%%%{init: {'flowchart': { 'curve': 'monotoneY'}, "
-                "'fontFamily': 'Verdana' } }%%%%\nflowchart TB\n\n");
+    if (!fp) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "Could not create graph view log file: {}", path + ".mmd");
+    }
 
-    std::map<const std::string, std::size_t> typeCounter;
-    std::map<std::shared_ptr<Node>, std::string> namePtrTable;
+    fmt::print(fp.get(),
+                "%%{{init: {{'flowchart': {{ 'curve': 'monotoneY'}}, "
+                "'fontFamily': 'Verdana' }} }}%%\nflowchart TB\n\n");
 
     // Start by creating every node
-    for (const std::shared_ptr<Node> &node_ptr : mNodes) {
-        const std::string currentType = node_ptr->type();
-        if (typeCounter.find(currentType) == typeCounter.end())
-            typeCounter[currentType] = 0;
-        ++typeCounter[currentType];
+    const auto namePtrTable = getRankedNodesName("{3}");
 
+    for (const std::shared_ptr<Node> &node_ptr : mNodes) {
         std::string givenName =
             (node_ptr->name().empty())
-                ? "<em>" + currentType + "#" + std::to_string(typeCounter[currentType]) + "</em>"
-                : "\"" + node_ptr->name() + "\\n<sub><em>( " + currentType + "#" + std::to_string(typeCounter[currentType]) + " )</em></sub>\"";
-        namePtrTable[node_ptr] =
-            (currentType + "_" + std::to_string(typeCounter[currentType]));
+                ? "<em>" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + "</em>"
+                : "\"" + node_ptr->name() + "\\n<sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
+
+        std::string nodeCls = "";
+        if (node_ptr->type() == "Producer") {
+          nodeCls = ":::producerCls";
+        }
+        else if (std::dynamic_pointer_cast<GenericOperator_Op>(node_ptr->getOperator())) {
+          nodeCls = ":::genericCls";
+        }
+        else if (const auto metaOp = std::dynamic_pointer_cast<MetaOperator_Op>(node_ptr->getOperator())) {
+          nodeCls = ":::metaCls";
+
+          if (verbose) {
+            metaOp->getMicroGraph()->save(path + "_" + node_ptr->type() + "#" + namePtrTable.at(node_ptr), verbose, showProducers);
+          }
+        }
 
         if (node_ptr == mRootNode) {
-          std::fprintf(fp, "%s(%s):::rootCls\n", namePtrTable[node_ptr].c_str(),
-                      givenName.c_str());
+          if (nodeCls.empty()) {
+            nodeCls = ":::rootCls";
+          }
+          else {
+            nodeCls += "_rootCls";
+          }
         }
-        else {
-            if ((currentType != "Producer") || showProducers) {
-                std::fprintf(fp, "%s(%s)\n", namePtrTable[node_ptr].c_str(),
-                            givenName.c_str());
-            }
+
+        if (node_ptr == mRootNode || node_ptr->type() != "Producer" || showProducers) {
+          fmt::print(fp.get(), "{}_{}({}){}\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                      givenName, nodeCls);
         }
     }
 
@@ -102,13 +123,20 @@ void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers)
             IOIndex_t inputIdx = 0;
             for (auto parent : child->inputs()) {
               if (parent.first == node_ptr && parent.second == outputIdx) {
+                // Add-on to display the operator's output dimensions
+                std::string dims = "";
+                const auto op = std::dynamic_pointer_cast<OperatorTensor>(node_ptr->getOperator());
+                if (op && !op->getOutput(outputIdx)->dims().empty()) {
+                  dims += " " + fmt::format("{}", op->getOutput(outputIdx)->dims());
+                }
+
                 if (mNodes.find(child) != mNodes.end()) {
-                  std::fprintf(fp, "%s-->|%u&rarr;%u|%s\n", namePtrTable[node_ptr].c_str(),
-                              outputIdx, inputIdx, namePtrTable[child].c_str());
+                  fmt::print(fp.get(), "{}_{}-->|\"{}{}&rarr;{}\"|{}_{}\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                              outputIdx, dims, inputIdx, child->type(), namePtrTable.at(child));
                 }
                 else if (verbose) {
-                  std::fprintf(fp, "%s-->|%u&rarr;%u|%p:::externalCls\n", namePtrTable[node_ptr].c_str(),
-                              outputIdx, inputIdx, static_cast<void*>(child.get()));
+                  fmt::print(fp.get(), "{}_{}-->|\"{}{}&rarr;{}\"|{}:::externalCls\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                              outputIdx, dims, inputIdx, static_cast<void*>(child.get()));
                 }
                 break;
               }
@@ -122,32 +150,52 @@ void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers)
 
     size_t inputIdx = 0;
     for (auto input : mInputNodes) {
-      std::fprintf(fp, "input%lu((in#%lu)):::inputCls--->|&rarr;%u|%s\n", inputIdx, inputIdx,
-                  input.second, namePtrTable[input.first].c_str());
+      if (input.first != nullptr) {
+        fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|&rarr;{}|{}_{}\n", inputIdx, inputIdx,
+                    input.second, input.first->type(), namePtrTable.at(input.first));
+      }
+      else {
+        fmt::print(fp.get(), "input{}((in#{})):::inputCls\n", inputIdx, inputIdx);
+      }
       ++inputIdx;
     }
 
     size_t outputIdx = 0;
     for (auto output : mOutputNodes) {
-      std::fprintf(fp, "%s--->|%u&rarr;|output%lu((out#%lu)):::outputCls\n",
-                   namePtrTable[output.first].c_str(), output.second,
-                   outputIdx, outputIdx);
-      ++outputIdx;
-    }
-
-    std::fprintf(fp, "classDef inputCls fill:#afa\n");
-    std::fprintf(fp, "classDef outputCls fill:#ffa\n");
-    std::fprintf(fp, "classDef externalCls fill:#ccc\n");
-    std::fprintf(fp, "classDef rootCls stroke:#f00\n");
+      if (output.first != nullptr) {
+        // Add-on to display the operator's output dimensions
+        std::string dims = "";
+        const auto op = std::dynamic_pointer_cast<OperatorTensor>(output.first->getOperator());
+        if (op && op->getOutput(output.second) && !op->getOutput(output.second)->dims().empty()) {
+          dims += " " + fmt::format("{}", op->getOutput(output.second)->dims());
+        }
 
-    if (verbose) {
-      for (const auto &c : typeCounter) {
-        std::printf("%s - %zu\n", c.first.c_str(), c.second);
+        fmt::print(fp.get(), "{}_{}--->|\"{}{}&rarr;\"|output{}((out#{})):::outputCls\n",
+                    output.first->type(), namePtrTable.at(output.first), output.second,
+                    dims, outputIdx, outputIdx);
+      }
+      else {
+        fmt::print(fp.get(), "output{}((out#{})):::outputCls\n", outputIdx, outputIdx);
       }
+      ++outputIdx;
     }
 
-    std::fprintf(fp, "\n");
-    std::fclose(fp);
+    fmt::print(fp.get(), "classDef inputCls fill:#afa\n");
+    fmt::print(fp.get(), "classDef outputCls fill:#ffa\n");
+    fmt::print(fp.get(), "classDef externalCls fill:#ccc\n");
+    fmt::print(fp.get(), "classDef producerCls fill:#ccf\n");
+    fmt::print(fp.get(), "classDef genericCls fill:#f9f9ff,stroke-width:1px,stroke-dasharray: 5 5\n");
+    fmt::print(fp.get(), "classDef metaCls stroke-width:5px\n");
+    fmt::print(fp.get(), "classDef rootCls stroke:#f00\n");
+    fmt::print(fp.get(), "classDef producerCls_rootCls stroke:#f00,fill:#ccf\n");
+    fmt::print(fp.get(), "classDef genericCls_rootCls stroke:#f00,fill:#f9f9ff,stroke-width:1px,stroke-dasharray: 5 5\n");
+    fmt::print(fp.get(), "classDef metaCls_rootCls stroke:#f00,stroke-width:5px\n");
+    fmt::print(fp.get(), "\n");
+}
+
+void Aidge::GraphView::setRootNode(NodePtr node) {
+  AIDGE_ASSERT(mNodes.find(node) != mNodes.end(), "Root node is not in the GraphView!");
+  mRootNode = node;
 }
 
 ///////////////////////////////////////////////////////
@@ -155,29 +203,43 @@ void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers)
 ///////////////////////////////////////////////////////
 
 void Aidge::GraphView::setOrderedInputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& inputs) {
-  AIDGE_ASSERT(inputs.size() <= mInputNodes.size(), "too many specified number of inputs");
-
+  size_t nbInputs = 0;
   std::vector<std::pair<NodePtr, IOIndex_t>> ignoredInputs(mInputNodes);
   for (auto input : inputs) {
-    auto it = std::find(ignoredInputs.begin(), ignoredInputs.end(), input);
-    AIDGE_ASSERT(it != ignoredInputs.end(), "unknown or duplicate input");
-    ignoredInputs.erase(it);
+    // Allow to specify dummy inputs (nullptr), but this will only be reflected
+    // in mInputNodes. All other functions (nbInputs(), inputs()) will not take
+    // it into account.
+    if (input.first != nullptr) {
+      auto it = std::find(ignoredInputs.begin(), ignoredInputs.end(), input);
+      AIDGE_ASSERT(it != ignoredInputs.end(), "unknown or duplicate input");
+      ignoredInputs.erase(it);
+      ++nbInputs;
+    }
   }
 
+  AIDGE_ASSERT(nbInputs <= mInputNodes.size(), "too many specified number of inputs");
+
   mInputNodes = inputs;
   mInputNodes.insert(mInputNodes.end(), ignoredInputs.begin(), ignoredInputs.end());
 }
 
 void Aidge::GraphView::setOrderedOutputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& outputs) {
-  AIDGE_ASSERT(outputs.size() <= mOutputNodes.size(), "too many specified number of outputs");
-
+  size_t nbOutputs = 0;
   std::vector<std::pair<NodePtr, IOIndex_t>> ignoredOutputs(mOutputNodes);
   for (auto output : outputs) {
-    auto it = std::find(ignoredOutputs.begin(), ignoredOutputs.end(), output);
-    AIDGE_ASSERT(it != ignoredOutputs.end(), "unknown or duplicate output");
-    ignoredOutputs.erase(it);
+    // Allow to specify dummy outputs (nullptr), but this will only be reflected
+    // in mOutputNodes. All other functions (nbOutputs(), outputs()) will not take
+    // it into account.
+    if (output.first != nullptr) {
+      auto it = std::find(ignoredOutputs.begin(), ignoredOutputs.end(), output);
+      AIDGE_ASSERT(it != ignoredOutputs.end(), "unknown or duplicate output");
+      ignoredOutputs.erase(it);
+      ++nbOutputs;
+    }
   }
 
+  AIDGE_ASSERT(nbOutputs <= mOutputNodes.size(), "too many specified number of outputs");
+
   mOutputNodes = outputs;
   mOutputNodes.insert(mOutputNodes.end(), ignoredOutputs.begin(), ignoredOutputs.end());
 }
@@ -248,7 +310,7 @@ Aidge::GraphView::inputs() const {
 
 
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
-Aidge::GraphView::inputs(std::string name) const {
+Aidge::GraphView::inputs(const std::string& name) const {
   return mNodeRegistry.at(name)->inputs();
 }
 
@@ -266,6 +328,8 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
 }
 
 void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims) {
+    std::set<NodePtr> startNodes = inputNodes();
+
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
@@ -288,17 +352,24 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                         nodePtr->getOperator()->associateInput(i, inputI.first->getOperator()->getRawOutput(inputI.second));
                     }
                     else {
-                        assert(false && "Non-tensor entries not handled yet.\n");
+                        AIDGE_ASSERT(false, "Non-tensor entries not handled yet.\n");
                     }
                 }
             } else {
-                assert(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty());
+                AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i)
+                    && !std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(), 
+                  "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
             }
 
         }
+
+        if (nodePtr->type() == Producer_Op::Type) {
+          startNodes.insert(nodePtr);
+        }
     }
     // Compute dimensions of every node
-    _forwardDims(inputNodes());
+    _forwardDims(startNodes);
+
 }
 
 void Aidge::GraphView::_forwardDims(std::set<std::shared_ptr<Node>> listNodes) {
@@ -314,7 +385,12 @@ void Aidge::GraphView::_forwardDims(std::set<std::shared_ptr<Node>> listNodes) {
                 nextList.insert(nodePtr);
             } else { // compute output dimensions of children
                 std::set<std::shared_ptr<Node>> children = nodePtr->getChildren();
-                nextList.insert(children.begin(), children.end());
+                for (auto child : children) {
+                  const auto childOp = std::static_pointer_cast<OperatorTensor>(child->getOperator());
+                  if (!childOp->outputDimsForwarded()) {
+                    nextList.insert(child);
+                  }
+                }
             }
         }
     }
@@ -327,6 +403,10 @@ void Aidge::GraphView::_forwardDims(std::set<std::shared_ptr<Node>> listNodes) {
             }
         }
     }
+
+    // Internal check to make sure we won't enter in an infinite loop!
+    AIDGE_ASSERT(nextList != listNodes, "Unable to forward dimensions (circular dependency and/or wrong dimensions?)");
+
     if (!nextList.empty()) {
         _forwardDims(nextList);
     }
@@ -357,12 +437,14 @@ Aidge::GraphView::outputs() const {
       // Keep only the nodes connected at this output position that are outside the GraphView
       std::vector<std::pair<std::shared_ptr<Node>, Aidge::IOIndex_t>> outsideOutputPos;
       for (const auto& output : outputPos) {
-        if (mNodes.find(output.first) == mNodes.end()) {
+        if (output.first == nullptr || mNodes.find(output.first) == mNodes.end()) {
           outsideOutputPos.push_back(output);
         }
       }
 
-      outsideOutputs.push_back(outsideOutputPos);
+      if (outputPos.empty() || !outsideOutputPos.empty()) {
+        outsideOutputs.push_back(outsideOutputPos);
+      }
     }
   }
   return outsideOutputs;
@@ -370,16 +452,18 @@ Aidge::GraphView::outputs() const {
 
 std::vector<
     std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>>
-Aidge::GraphView::outputs(std::string nodeName) const {
+Aidge::GraphView::outputs(const std::string& nodeName) const {
   return mNodeRegistry.at(nodeName)->outputs();
 }
 
 void Aidge::GraphView::setInputId(Aidge::IOIndex_t /*inID*/,
                                Aidge::IOIndex_t /*newNodeOutID*/) {
-  printf("Not implemented yet.\n");
+  fmt::print("Not implemented yet.\n");
 }
 
 void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnableParam) {
+  AIDGE_ASSERT(node != nullptr, "Trying to add non-existant node!");
+
   // first node to be added to the graph is the root node by default
   if (mRootNode == nullptr) {
     mRootNode = node;
@@ -410,6 +494,59 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
   }
 }
 
+std::pair<std::vector<Aidge::NodePtr>, size_t> Aidge::GraphView::getRankedNodes() const {
+  std::set<NodePtr> nodesToRank(mNodes);
+  nodesToRank.erase(mRootNode);
+  std::vector<NodePtr> rankedNodes;
+  rankedNodes.push_back(mRootNode);
+
+  for (size_t curNodeIdx = 0; curNodeIdx < rankedNodes.size(); ++curNodeIdx) {
+    NodePtr curNode = rankedNodes[curNodeIdx];
+
+    for (auto childs : curNode->getOrderedChildren()) {
+      for (auto child : childs) {
+        if (child != nullptr && nodesToRank.find(child) != nodesToRank.end()) {
+          rankedNodes.push_back(child);
+          nodesToRank.erase(child);
+        }
+      }
+    }
+
+    for (auto parent : curNode->getParents()) {
+      if (parent != nullptr && nodesToRank.find(parent) != nodesToRank.end()) {
+        rankedNodes.push_back(parent);
+        nodesToRank.erase(parent);
+      }
+    }
+  }
+
+  const size_t orderUnicityLimit = rankedNodes.size();
+  if (!nodesToRank.empty()) {
+    rankedNodes.insert(rankedNodes.end(), nodesToRank.begin(), nodesToRank.end());
+  }
+
+  return std::make_pair(rankedNodes, orderUnicityLimit);
+}
+
+std::map<Aidge::NodePtr, std::string> Aidge::GraphView::getRankedNodesName(const std::string& format, bool markNonUnicity) const {
+  const auto rankedNodes = getRankedNodes();
+  std::map<NodePtr, std::string> rankedNodesName;
+  size_t rank = 0;
+  std::map<std::string, size_t> typeRank;
+  for (const auto& rankedNode : rankedNodes.first) {
+    std::map<std::string, size_t>::iterator it;
+    std::tie(it, std::ignore) = typeRank.insert(std::make_pair(rankedNode->type(), 0));
+
+    const auto name = (markNonUnicity && rank < rankedNodes.second)
+      ? fmt::format(format, rankedNode->name(), rankedNode->type(), rank, it->second)
+      : fmt::format(format, rankedNode->name(), rankedNode->type(), fmt::format("?{}", rank), fmt::format("?{}", it->second));
+    rankedNodesName.insert(std::make_pair(rankedNode, name));
+    ++it->second;
+    ++rank;
+  }
+  return rankedNodesName;
+}
+
 bool Aidge::GraphView::add(std::set<std::shared_ptr<Node>> otherNodes, bool includeLearnableParam) {
   if (otherNodes.empty()) {
     return true;
@@ -464,7 +601,7 @@ bool Aidge::GraphView::add(std::set<std::shared_ptr<Node>> otherNodes, bool incl
 
     for (auto childs : curNode->getOrderedChildren()) {
       for (auto child : childs) {
-        if (nodesToRank.find(child) != nodesToRank.end()) {
+        if (child != nullptr && nodesToRank.find(child) != nodesToRank.end()) {
           rankedNodes.push_back(child);
           nodesToRank.erase(child);
 
@@ -477,7 +614,7 @@ bool Aidge::GraphView::add(std::set<std::shared_ptr<Node>> otherNodes, bool incl
     }
 
     for (auto parent : curNode->getParents()) {
-      if (nodesToRank.find(parent) != nodesToRank.end()) {
+      if (parent != nullptr && nodesToRank.find(parent) != nodesToRank.end()) {
         rankedNodes.push_back(parent);
         nodesToRank.erase(parent);
 
@@ -578,7 +715,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents() const {
 std::vector<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents(const std::string nodeName) const {
   std::map<std::string, std::shared_ptr<Node>>::const_iterator it = mNodeRegistry.find(nodeName);
   if (it == mNodeRegistry.end()) {
-    printf("No such node a %s in %s graph.\n", nodeName.c_str(), name().c_str());
+    fmt::print("No such node a {} in {} graph.\n", nodeName, name());
     exit(-1);
   }
   return (it->second)->getParents();
@@ -607,8 +744,7 @@ Aidge::GraphView::getChildren(const std::string nodeName) const {
   std::map<std::string, std::shared_ptr<Node>>::const_iterator it =
       mNodeRegistry.find(nodeName);
   if (it == mNodeRegistry.end()) {
-    printf("No such node a %s in %s graph.\n", nodeName.c_str(),
-           name().c_str());
+    fmt::print("No such node a {} in {} graph.\n", nodeName, name());
     exit(-1);
   }
   return (it->second)->getOrderedChildren();
@@ -618,7 +754,7 @@ std::set<std::shared_ptr<Aidge::Node>>
 Aidge::GraphView::getChildren(const std::shared_ptr<Node> otherNode) const {
   std::set<std::shared_ptr<Node>>::const_iterator it = mNodes.find(otherNode);
   if (it == mNodes.end()) {
-    printf("No such node in graph.\n");
+    fmt::print("No such node in graph.\n");
     exit(-1);
   }
   return (*it)->getChildren();
@@ -632,7 +768,7 @@ Aidge::GraphView::getNode(const std::string& nodeName) const {
   if (it != mNodeRegistry.cend()) {
     return it->second;
   } else {
-    printf("No Node named %s in the current GraphView.\n", nodeName.c_str());
+    fmt::print("No Node named {} in the current GraphView.\n", nodeName);
     return nullptr;
   }
 }
@@ -681,13 +817,13 @@ void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnab
 
 
 bool Aidge::GraphView::swap(Node & /*node*/, Node & /*otherNode*/) {
-  printf("Swap() not implementated yet. Return false.\n");
+  fmt::print("Swap() not implementated yet. Return false.\n");
   return false;
 }
 
-void Aidge::GraphView::link(std::string /*name1_inID*/,
-                           std::string /*name2_outID*/) {
-  printf("Not implemented yet.\n");
+void Aidge::GraphView::link(const std::string& /*name1_inID*/,
+                           const std::string& /*name2_outID*/) {
+  fmt::print("Not implemented yet.\n");
 }
 
 void Aidge::GraphView::insertParent(NodePtr childNode,
@@ -708,18 +844,24 @@ void Aidge::GraphView::insertParent(NodePtr childNode,
 }
 
 bool Aidge::GraphView::replace(const std::set<Aidge::NodePtr>& oldNodes, const std::set<Aidge::NodePtr>& newNodes) {
-    // TODO: handle case where an oldNodes parameter does not come from a Producer but another Node (not included in oldNodes)
-    // How to distinguish it from data input?
-    // TODO: Parameter Tensors could be identified with their dimensions
-    // TODO: Take GraphView as input parameters since new Nodes should be connected whatever.
-    // It also avoids specifying each producer since they are automatically included
-
     // (1) create GraphViews from both sets of Nodes
     auto oldG = std::make_shared<GraphView>("oldG");
     oldG->add(oldNodes, false);
     auto newG = std::make_shared<GraphView>("newG");
     newG->add(newNodes, false);
 
+    return GraphView::replace(oldG, newG);
+}
+
+bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldG, const std::shared_ptr<GraphView>& newG) {
+    // TODO: handle case where an oldNodes parameter does not come from a Producer but another Node (not included in oldNodes)
+    // How to distinguish it from data input?
+    // TODO: Parameter Tensors could be identified with their dimensions
+    // TODO: Take GraphView as input parameters since new Nodes should be connected whatever.
+    // It also avoids specifying each producer since they are automatically included
+    const auto& oldNodes = oldG->getNodes();
+    const auto& newNodes = newG->getNodes();
+
     const auto oldOI = oldG->getOrderedInputs();
     const auto oldOO = oldG->getOrderedOutputs();
     const auto newOI = newG->getOrderedInputs();
@@ -1067,6 +1209,10 @@ void Aidge::GraphView::updateInputsOutputsDelete(std::shared_ptr<Node> deletedNo
       }
     }
   }
+
+  if (deletedNode == mRootNode) {
+    mRootNode = nullptr;
+  }
 }
 
 
@@ -1206,3 +1352,31 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
 
   return newGraph;
 }
+
+std::shared_ptr<Aidge::GraphView> Aidge::getConnectedGraphView(std::shared_ptr<Node> node) {
+  std::vector<NodePtr> foundNodes;
+  foundNodes.push_back(node);
+
+  for (size_t curNodeIdx = 0; curNodeIdx < foundNodes.size(); ++curNodeIdx) {
+    NodePtr curNode = foundNodes[curNodeIdx];
+
+    for (auto childs : curNode->getOrderedChildren()) {
+      for (auto child : childs) {
+        if (child != nullptr && std::find(foundNodes.begin(), foundNodes.end(), child) == foundNodes.end()) {
+          foundNodes.push_back(child);
+        }
+      }
+    }
+
+    for (auto parent : curNode->getParents()) {
+      if (parent != nullptr && std::find(foundNodes.begin(), foundNodes.end(), parent) == foundNodes.end()) {
+        foundNodes.push_back(parent);
+      }
+    }
+  }
+
+  auto graph = std::make_shared<GraphView>();
+  graph->add(node);
+  graph->add({foundNodes.cbegin(), foundNodes.cend()});
+  return graph;
+}
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 6f0cc55159b1cc72b87bb34230376eb140b7ab8a..5d210144e2faa122416186734c52b67f1a0f8281 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -171,7 +171,7 @@ Aidge::IOIndex_t Aidge::Node::nbValidOutputs() const {
 void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId) {
     assert(inId != gk_IODefaultIndex && (inId < nbInputs()) && "Must be a valid index");
     if (mIdOutParents[inId] != gk_IODefaultIndex) {
-        std::printf("Warning: filling a Tensor already attributed\n");
+        fmt::print("Warning: filling a Tensor already attributed\n");
         auto originalParent = input(inId);
         // remove original parent reference to child
         // find the output ID for original Parent
@@ -187,10 +187,14 @@ void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId)
 
 void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t outId,
                              const IOIndex_t otherInId) {
-    assert((otherInId < otherNode->nbInputs()) && "Input index out of bound.");
-    assert((outId < nbOutputs()) && "Output index out of bound.");
+    AIDGE_ASSERT(otherInId < otherNode->nbInputs(),
+        "Input index (#{}) of the node {} (of type {}) is out of bound (it has {} inputs), when trying to add it as a child of node {} (of type {})",
+        otherInId, otherNode->name(), otherNode->type(), otherNode->nbInputs(), name(), type());
+    AIDGE_ASSERT(outId < nbOutputs(),
+        "Output index (#{}) of the node {} (of type {}) is out of bound (it has {} outputs), when trying to add the child node {} (of type {})",
+        outId, name(), type(), nbOutputs(), otherNode->name(), otherNode->type());
     if (otherNode->input(otherInId).second != gk_IODefaultIndex) {
-        std::printf("Warning, the %d-th Parent of the child node already existed.\n", otherInId);
+        fmt::print("Warning, the {}-th Parent of the child node already existed.\n", otherInId);
     }
     // manage tensors and potential previous parent
     otherNode->setInputId(otherInId, outId);
@@ -203,18 +207,11 @@ void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t ou
 
 void Aidge::Node::addChildView(std::shared_ptr<GraphView> otherGraph, const IOIndex_t outId,
                                std::pair<std::shared_ptr<Node>, IOIndex_t> otherInId) {
-    assert((otherInId.second < otherInId.first->nbInputs()) &&
-           "Other graph input index out of bound.");
-    assert((outId < nbOutputs()) && "Output index out of bound.");
-    std::set<std::shared_ptr<Node>> inNodes = otherGraph->inputNodes();
-    if (inNodes.size() == std::size_t(0)) {  // no input Node
-        printf("Cannot add GraphView to the Node. No input node detected.\n");
-    } else  // inNodes.size() >= 1
-    {
-        assert((inNodes.find(otherInId.first) !=
-                inNodes.end()));  // assert it really is an input node
-        addChildOp(otherInId.first, outId, otherInId.second);
-    }
+    const auto inNodes = otherGraph->inputNodes();
+    AIDGE_ASSERT(otherInId.first != nullptr && inNodes.find(otherInId.first) != inNodes.end(),
+        "Node {} (of type {}) is not a valid input node of GraphView {}, when trying to add it as a child of node {} (of type {})",
+        (otherInId.first) ? otherInId.first->name() : "#nullptr", (otherInId.first) ? otherInId.first->type() : "", otherGraph->name(), name(), type());
+    addChildOp(otherInId.first, outId, otherInId.second);
 }
 
 void Aidge::Node::addChild(std::shared_ptr<Node> otherNode, const IOIndex_t outId,
@@ -229,9 +226,9 @@ void Aidge::Node::addChild(std::shared_ptr<Node> otherNode, const IOIndex_t outI
 void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t outId,
                            std::pair<std::shared_ptr<Node>, IOIndex_t> otherInId) {
     if (!otherInId.first) {
-        assert((otherView->inputNodes().size() == 1U) &&
-               "Specify an input Node for the GraphView. More or less than one "
-               "Node is not explicit.");
+        AIDGE_ASSERT(otherView->inputNodes().size() == 1U,
+            "Input node of GraphView {} need to be specified, because it has more than one input ({} inputs), when trying to add it as a child of node {} (of type {})",
+            otherView->name(), otherView->inputNodes().size(), name(), type());
         otherInId.first = *(otherView->inputNodes().begin());
     }
     otherInId.second = (otherInId.second != gk_IODefaultIndex)
@@ -242,7 +239,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
 
 void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) {
     if (getParent(inId) != nullptr) {
-        printf("Warning, you're replacing a Parent.\n");
+        fmt::print("Warning, you're replacing a Parent.\n");
     }
     assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
     mParents[inId] = other_node;
@@ -288,8 +285,7 @@ std::vector<std::vector<std::shared_ptr<Aidge::Node>>> Aidge::Node::getOrderedCh
 
 std::vector<std::shared_ptr<Aidge::Node>> Aidge::Node::getChildren(const IOIndex_t outId) const {
     assert((outId < nbOutputs()) && "Output index out of bound.");
-    std::vector<std::shared_ptr<Node>> children =
-            std::vector<std::shared_ptr<Node>>(mChildren[outId].size());
+    std::vector<std::shared_ptr<Node>> children;
     for (std::size_t i = 0; i < mChildren[outId].size(); ++i) {
         children.push_back(mChildren[outId][i].lock());
     }
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index 192036651cfbe2df71139dd63ca3d71f07300964..5556f4ff5c87d1adc23f5bff1aaf90c230de06cc 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -15,3 +15,7 @@
 
 const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Identity
     = [](const std::vector<std::vector<size_t>>& inputsDims) { return inputsDims; };
+
+const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs) {
+    return [nbOutputs, inputIdx](const std::vector<std::vector<size_t>>& inputsDims) { return std::vector<std::vector<size_t>>(nbOutputs, inputsDims[inputIdx]); };
+}
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6e34c1a2005f551c255e9b7441e853015354337f
--- /dev/null
+++ b/src/operator/Memorize.cpp
@@ -0,0 +1,56 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Memorize.hpp"
+
+const std::string Aidge::Memorize_Op::Type = "Memorize";
+
+void Aidge::Memorize_Op::computeOutputDims() {
+    for (size_t i = 0; i < 2; ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        }
+    }
+
+    // Only require one of the input to have dims defined
+    // Otherwise, forwardDims() won't converge!
+    if (!(getInput(0)->empty())) {
+        const auto expectedDims =  getInput(0)->dims();
+        mOutputs[0]->resize(expectedDims);
+    }
+    else if (!(getInput(1)->empty())) {
+        const auto expectedDims =  getInput(1)->dims();
+        mOutputs[0]->resize(expectedDims);
+    }
+}
+
+bool Aidge::Memorize_Op::outputDimsForwarded() const {
+    // Only check the output dims
+    bool forwarded = true;
+    // check outputs have been filled
+    for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
+        forwarded &= !(getOutput(i)->empty());
+    }
+    return forwarded;
+}
+
+void Aidge::Memorize_Op::updateConsummerProducer() {
+    Operator::updateConsummerProducer();
+    ++this->template getAttr<MemorizeAttr::ScheduleStep>();
+    this->template getAttr<MemorizeAttr::ForwardStep>() = 0;
+}
+
+void Aidge::Memorize_Op::forward() {
+    Operator::forward();
+    ++this->template getAttr<MemorizeAttr::ForwardStep>();
+    this->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
+}
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 530357085a16ca3e834669cebd2d26882ca8ddab..883185021b395b42e5c47ef0461ebc0614f14456 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -13,10 +13,10 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph)
-    : OperatorTensor(type, graph->dataInputs().size(), (graph->inputs().size() - graph->dataInputs().size()), graph->outputs().size()),
+    : OperatorTensor(type, graph->dataInputs().size(), (graph->getOrderedInputs().size() - graph->dataInputs().size()), graph->getOrderedOutputs().size()),
         mGraph(graph)
 {
-    mInputs = std::vector<std::shared_ptr<Tensor>>(mGraph->inputs().size());
+    mInputs = std::vector<std::shared_ptr<Tensor>>(mGraph->getOrderedInputs().size());
     for (std::size_t i = 0; i < mInputs.size(); ++i) {
         mInputs[i] = std::make_shared<Tensor>();
     }
@@ -24,7 +24,9 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<
     mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->getOrderedOutputs().size());
     for (size_t outputIdx = 0; outputIdx < mOutputs.size(); ++outputIdx) {
         const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
-        mOutputs[outputIdx] = std::dynamic_pointer_cast<Tensor>(outputOp.first->getOperator()->getRawOutput(outputOp.second));
+        if (outputOp.first) {
+            mOutputs[outputIdx] = std::dynamic_pointer_cast<Tensor>(outputOp.first->getOperator()->getRawOutput(outputOp.second));
+        }
     }
 }
 
@@ -34,7 +36,42 @@ Aidge::NbElts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputI
     }
     else {
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
-        return inputOp.first->getOperator()->getNbRequiredData(inputOp.second);
+        if (inputOp.first) {
+            return inputOp.first->getOperator()->getNbRequiredData(inputOp.second);
+        }
+        else {
+            return 0;
+        }
+    }
+}
+
+Aidge::NbElts_t Aidge::MetaOperator_Op::getNbRequiredProtected(const IOIndex_t inputIdx) const {
+    if (mImpl) {
+        return mImpl->getNbRequiredProtected(inputIdx);
+    }
+    else {
+        const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
+        if (inputOp.first) {
+            return inputOp.first->getOperator()->getNbRequiredProtected(inputOp.second);
+        }
+        else {
+            return 0;
+        }
+    }
+}
+
+Aidge::NbElts_t Aidge::MetaOperator_Op::getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const {
+    if (mImpl) {
+        return mImpl->getRequiredMemory(outputIdx, inputsSize);
+    }
+    else {
+        const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
+        if (outputOp.first) {
+            return outputOp.first->getOperator()->getRequiredMemory(outputOp.second, inputsSize);
+        }
+        else {
+            return 0;
+        }
     }
 }
 
@@ -44,7 +81,12 @@ Aidge::NbElts_t Aidge::MetaOperator_Op::getNbConsumedData(IOIndex_t inputIdx) co
     }
     else {
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
-        return inputOp.first->getOperator()->getNbConsumedData(inputOp.second);
+        if (inputOp.first) {
+            return inputOp.first->getOperator()->getNbConsumedData(inputOp.second);
+        }
+        else {
+            return 0;
+        }
     }
 }
 
@@ -54,7 +96,12 @@ Aidge::NbElts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) c
     }
     else {
         const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
-        return outputOp.first->getOperator()->getNbProducedData(outputOp.second);
+        if (outputOp.first) {
+            return outputOp.first->getOperator()->getNbProducedData(outputOp.second);
+        }
+        else {
+            return 0;
+        }
     }
 }
 
@@ -65,10 +112,9 @@ void Aidge::MetaOperator_Op::updateConsummerProducer() {
     else {
         if (!mScheduler) {
             // Lazy initialization
-            mScheduler = std::make_shared<SequentialScheduler>(mGraph);
+            mScheduler = std::make_shared<SequentialScheduler>(mGraph, mUpperNode.lock());
         }
 
-
         // TODO: check that generateScheduling() can be called multiple time to iteratively update the schedule.
         // It could be a good idea to unify updateConsummerProducer() and generateScheduling() into a "updateScheduling()"
         mScheduler->generateScheduling();
@@ -86,7 +132,7 @@ void Aidge::MetaOperator_Op::forward() {
             // Lazy initialization
             // TODO: should we assert that a scheduler already exists at this point?
             // => should be created in updateConsummerProducer()
-            mScheduler = std::make_shared<SequentialScheduler>(mGraph);
+            mScheduler = std::make_shared<SequentialScheduler>(mGraph, mUpperNode.lock());
             mScheduler->generateScheduling();
         }
 
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index e6d8b017c0c3e5effb43dd789b569f283154e80d..d4a594e95b2695b496fc28b8e8a7fcf3442e9253 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -50,4 +50,7 @@ void Aidge::Mul_Op::computeOutputDims() {
         }
         mOutputs[0]->resize(outDims);
     }
+    else if (!getInput(0)->empty() && !getInput(1)->empty()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul: {} and {}", getInput(0)->dims(), getInput(1)->dims());
+    }
 }
\ No newline at end of file
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index 4adc57f55f7531c28c0c0603ee01c176bdd59e96..289b2be90735d848e5083090d2ae4319a7490fde 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -32,19 +32,37 @@ Aidge::Operator::~Operator() noexcept = default;
 ///////////////////////////////////////////////////////
 
 Aidge::NbElts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+    AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredData(): an implementation is required for {}!", type());
     return mImpl->getNbRequiredData(inputIdx);
 }
 
+Aidge::NbElts_t Aidge::Operator::getNbRequiredProtected(const Aidge::IOIndex_t inputIdx) const {
+    AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredProtected(): an implementation is required for {}!", type());
+    return mImpl->getNbRequiredProtected(inputIdx);
+}
+
+Aidge::NbElts_t Aidge::Operator::getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const {
+    AIDGE_ASSERT(mImpl != nullptr, "getRequiredMemory(): an implementation is required for {}!", type());
+    return mImpl->getRequiredMemory(outputIdx, inputsSize);
+}
+
 Aidge::NbElts_t Aidge::Operator::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
+    AIDGE_ASSERT(mImpl != nullptr, "getNbConsumedData(): an implementation is required for {}!", type());
     return mImpl->getNbConsumedData(inputIdx);
 }
 
 Aidge::NbElts_t Aidge::Operator::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
+    AIDGE_ASSERT(mImpl != nullptr, "getNbProducedData(): an implementation is required for {}!", type());
     return mImpl->getNbProducedData(outputIdx);
 }
 void Aidge::Operator::updateConsummerProducer(){
+    AIDGE_ASSERT(mImpl != nullptr, "updateConsummerProducer(): an implementation is required for {}!", type());
     mImpl->updateConsummerProducer();
 }
+void Aidge::Operator::resetConsummerProducer(){
+    AIDGE_ASSERT(mImpl != nullptr, "resetConsummerProducer(): an implementation is required for {}!", type());
+    mImpl->resetConsummerProducer();
+}
 
 void Aidge::Operator::runHooks() const {
     for (auto& hook : mHooks) {
@@ -52,12 +70,9 @@ void Aidge::Operator::runHooks() const {
     }
 }
 void Aidge::Operator::forward() {
-    if(mImpl) {
-        mImpl->forward();
-        runHooks();
-    } else {
-        printf("forward: No implementation is linked.\n");
-    }
+    AIDGE_ASSERT(mImpl != nullptr, "forward(): an implementation is required for {}!", type());
+    mImpl->forward();
+    runHooks();
 }
 
 void Aidge::Operator::backward() { mImpl->backward(); }
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index 72a71814b1463395443c6a4504f2eef660ec1185..c0ada265410f9bc46aab3b43fae270f1e74dd5eb 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -20,19 +20,13 @@
 
 
 void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
-    if (inputIdx >= nbInputs()) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu inputs", type().c_str(), nbInputs());
-    }
-    if (strcmp((data)->type(), Tensor::Type) != 0) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input data must be of Tensor type");
-    }
+    AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs());
+    AIDGE_ASSERT(data->type() == Tensor::Type, "Input data must be of Tensor type");
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
 }
 
 void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
-    if (strcmp(data->type(), "Tensor") != 0) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
-    }
+    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
     if (getInput(inputIdx)) {
         *mInputs[inputIdx] = *std::dynamic_pointer_cast<Tensor>(data);
     } else {
@@ -43,9 +37,7 @@ void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, const std:
 Aidge::OperatorTensor::~OperatorTensor() = default;
 
 void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, std::shared_ptr<Aidge::Data>&& data) {
-    if (strcmp(data->type(), "Tensor") != 0) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
-    }
+    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
     if (getInput(inputIdx)) {
         *mInputs[inputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
     } else {
@@ -54,36 +46,24 @@ void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, std::share
 }
 
 const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidge::IOIndex_t inputIdx) const {
-    if (inputIdx >= nbInputs()) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu inputs", type().c_str(), nbInputs());
-    }
+    AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs());
     return mInputs[inputIdx];
 }
 
 void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) {
-    if (strcmp(data->type(), "Tensor") != 0) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
-    }
-    if (outputIdx >= nbOutputs()) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbOutputs());
-    }
+    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
+    AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs());
     *mOutputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data);
 }
 
 void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) {
-    if (strcmp(data->type(), "Tensor") != 0) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator only accepts Tensors as inputs", type().c_str());
-    }
-    if (outputIdx >= nbOutputs()) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbOutputs());
-    }
+    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
+    AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs());
     *mOutputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
 }
 
 const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aidge::IOIndex_t outputIdx) const {
-    if (outputIdx >= nbOutputs()) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "%s Operator has %hu outputs", type().c_str(), nbOutputs());
-    }
+    AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs());
     return mOutputs[outputIdx];
 }
 
@@ -105,7 +85,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
     }
     for (DimIdx_t i = 0; i < outputDims.size(); ++i) {
         if (((outputDims[i] + firstEltDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
         }
     }
     // return the same Tensor description as given in function parameter for each data input
@@ -117,7 +97,7 @@ void Aidge::OperatorTensor::computeOutputDims() {
     bool associated = (nbInputs() > 0); // do not compute anything if no input
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
         if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
         }
         associated &= !(getInput(i)->empty());
     }
@@ -125,7 +105,9 @@ void Aidge::OperatorTensor::computeOutputDims() {
         const auto expectedDims =  getInput(0)->dims();
         for (std::size_t i = 1; i < nbInputs(); ++i) {
             if (expectedDims != getInput(i)->dims()) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator's inputs should have the same dimensions");
+                AIDGE_THROW_OR_ABORT(std::runtime_error,
+                    "{} operator's inputs should have the same dimensions: expected {} (input #0), given {} (input #{})",
+                    type(), expectedDims, getInput(i)->dims(), i);
             }
         }
         mOutputs[0]->resize(expectedDims);
@@ -139,7 +121,9 @@ bool Aidge::OperatorTensor::outputDimsForwarded() const {
         forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
     }
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
-        forwarded &= !(getOutput(i)->empty());
+        // If getOutput(i) is nullptr, ignore this output (it may be a dummy
+        // output in a MetaOperator)
+        forwarded &= (getOutput(i)) ? !(getOutput(i)->empty()) : true;
     }
     return forwarded;
 }
@@ -150,6 +134,7 @@ void Aidge::OperatorTensor::setDataType(const DataType& dataType) const {
     }
 
     for (IOIndex_t i = nbData(); i < nbInputs(); ++i) {
+        AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
         getInput(i)->setDataType(dataType);
     }
 }
\ No newline at end of file
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3dd65eb4d34266f6e419bdc86362b8da4a55fdf0
--- /dev/null
+++ b/src/operator/Pop.cpp
@@ -0,0 +1,38 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <string>
+
+#include "aidge/operator/Pop.hpp"
+
+const std::string Aidge::Pop_Op::Type = "Pop";
+
+void Aidge::Pop_Op::computeOutputDims() {
+    // check inputs have been associated
+    if (!getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
+    }
+    if (!(getInput(0)->empty())) {
+        auto inputDims = getInput(0)->dims();
+        inputDims.erase(inputDims.begin());
+        getOutput(0)->resize(inputDims);
+    }
+}
+
+void Aidge::Pop_Op::updateConsummerProducer() {
+    Operator::updateConsummerProducer();
+    this->template getAttr<PopAttr::ForwardStep>() = 0;
+}
+
+void Aidge::Pop_Op::forward() {
+    Operator::forward();
+    ++this->template getAttr<PopAttr::ForwardStep>();
+}
diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..48ed5f8286712c94bcf87f3234e70080652ab141
--- /dev/null
+++ b/src/operator/Sigmoid.cpp
@@ -0,0 +1,16 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <string>
+
+#include "aidge/operator/Sigmoid.hpp"
+
+const std::string Aidge::Sigmoid_Op::Type = "Sigmoid";
\ No newline at end of file
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 11d91a1fcd4c1d4ee6bcc5f9d830870fa6e732e5..6d2670695b2ffe9acbf09edd3e82f8549a4184f0 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -27,7 +27,7 @@ const std::string Aidge::Slice_Op::Type = "Slice";
 void Aidge::Slice_Op::computeOutputDims() {
     // check input have been associated
     if (!getInput(0) || (getInput(0)->empty())) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
     }
 
     const DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..de55a6d6c69df5706b945ef9f56027f7a09ce8d7
--- /dev/null
+++ b/src/operator/Tanh.cpp
@@ -0,0 +1,16 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <string>
+
+#include "aidge/operator/Tanh.hpp"
+
+const std::string Aidge::Tanh_Op::Type = "Tanh";
\ No newline at end of file
diff --git a/src/recipes/ExpandMetaOps.cpp b/src/recipes/ExpandMetaOps.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..16f0b4c52f394e32e24fa49951c39a7c2cb35162
--- /dev/null
+++ b/src/recipes/ExpandMetaOps.cpp
@@ -0,0 +1,36 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/recipes/Recipes.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+
+void Aidge::expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive) {
+    bool found = false;
+    const auto nodes = graph->getNodes();
+    for (auto node : nodes) {
+        auto metaOp = std::dynamic_pointer_cast<MetaOperator_Op>(node->getOperator());
+
+        if (metaOp != nullptr) {
+            // Replace meta op by its micro-graph
+            // graph will be updated accordingly in GraphView::replace()
+            auto g = std::make_shared<GraphView>();
+            g->add(node, false);
+            GraphView::replace(g, metaOp->getMicroGraph());
+            found = true;
+        }
+    }
+
+    if (found && recursive) {
+        expandMetaOps(graph, true);
+    }
+}
diff --git a/src/recipies/ExplicitCastMove.cpp b/src/recipes/ExplicitCastMove.cpp
similarity index 95%
rename from src/recipies/ExplicitCastMove.cpp
rename to src/recipes/ExplicitCastMove.cpp
index 5651f2ba4cc939678ab306137464c52caa1db46c..7d836c3acc835c5ed3fe014db6787029dc318afd 100644
--- a/src/recipies/ExplicitCastMove.cpp
+++ b/src/recipes/ExplicitCastMove.cpp
@@ -9,7 +9,7 @@
  *
  ********************************************************************************/
 
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Cast.hpp"
 #include "aidge/operator/Move.hpp"
@@ -20,6 +20,7 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) {
     for (auto node : nodes) {
         // TODO: currently, Operator data type is only reflected in its output tensor data type.
         // But an Operator might have multiple outputs of different data type(?)
+        AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
         const auto& output = std::static_pointer_cast<OperatorTensor>(node->getOperator())->getOutput(0);
         if (output->getImpl() == nullptr) {
             continue;
@@ -32,6 +33,7 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) {
             const auto parent = node->inputs()[0];
             // Check parent is not nullptr, as this Operator may be an entry point of the graph without parent
             if (parent.first != nullptr) {
+                AIDGE_ASSERT(parent.first->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
                 const auto& input = std::static_pointer_cast<OperatorTensor>(parent.first->getOperator())->getOutput(parent.second);
 
                 if ((node->type() == Cast_Op::Type && input->dataType() == output->dataType())
diff --git a/src/recipies/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
similarity index 94%
rename from src/recipies/FuseBatchNorm.cpp
rename to src/recipes/FuseBatchNorm.cpp
index 2fb017567550ada083d0d79d0323b0b45998026f..ac1fc8d7922827217d31385395666db53c401306 100644
--- a/src/recipies/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -21,7 +21,7 @@
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/MetaOperator.hpp"
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
@@ -53,6 +53,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
     DimSize_t convNbOutChannels;
     DimSize_t channelsSize;
     std::array<DimSize_t, 2> kernelDims;
+    AIDGE_ASSERT(convNode->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
     std::shared_ptr<OperatorTensor> convOp = std::static_pointer_cast<OperatorTensor>(convNode->getOperator());
     if (convNode->type() == Conv_Op<2>::Type) {
         const std::shared_ptr<Conv_Op<2>> convOpPtr =
@@ -89,13 +90,13 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
             meanVariance += b_var.get<float>(outChId);
             ++count;
         } else {
-            printf("Zero-variance: %s [%lu]\n", convNode->name().c_str(), outChId);
+            fmt::print("Zero-variance: {} [{}]\n", convNode->name(), outChId);
         }
     }
     if (count > 0)
         meanVariance /= count;
     else {
-        printf("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
+        fmt::print("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
     }
 
     std::shared_ptr<Tensor> weightBuf, biasBuf;
@@ -172,7 +173,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::MatchSolution> solution) {
 void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::GraphView> graphView) {
     std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
     regex->setNodeKey("BatchNorm", "getType($) =='BatchNorm'");
-    printf("\n============================\nSearching for solutions\n==============================\n");
+    fmt::print("\n============================\nSearching for solutions\n==============================\n");
     regex->setNodeKey(
             "OP",
             "getType($) =='Conv' || getType($) =='ConvDepthWise' || getType($) =='PaddedConv' || getType($) =='PaddedConvDepthWise'");
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipes/FuseMulAdd.cpp
similarity index 62%
rename from src/recipies/FuseMulAdd.cpp
rename to src/recipes/FuseMulAdd.cpp
index 85bfc408f092d9f234265db51a01eff1ab64005b..f408959a13d007853c24e30c1ef683648cf9c200 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipes/FuseMulAdd.cpp
@@ -15,7 +15,7 @@
 #include <string>
 
 #include "aidge/operator/FC.hpp"
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Producer.hpp"
@@ -36,15 +36,44 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
 
     // Step 1 : Create FC
     // Fetch the output dimension throught the bias size
-    std::shared_ptr<Node> bias = (addNode->getParent(1)) ? addNode->getParent(1)->cloneSharedOperators() : nullptr;
+    std::shared_ptr<Node> bias = nullptr;
+    if (addNode->getParent(0) == matmulNode) {
+        AIDGE_ASSERT(matmulNode->getParent(1), "No bias detected to produce the fuseMulAdd recipe.");
+        bias = addNode->getParent(1)->cloneSharedOperators();
+    }
+    else if (addNode->getParent(1) == matmulNode) {
+        AIDGE_ASSERT(matmulNode->getParent(0), "No bias detected to produce the fuseMulAdd recipe.");
+        bias = addNode->getParent(0)->cloneSharedOperators();
+    }
 
-    AIDGE_ASSERT(matmulNode->getParent(1), "No weight detected to produce the fuseMulAdd recipe.");
+    std::shared_ptr<Node> weight = nullptr;
+    if ((matmulNode->getParent(1) && !matmulNode->getParent(0))
+        || (matmulNode->getParent(1) && matmulNode->getParent(1)->getOperator()->type() == Producer_Op::Type
+            && matmulNode->getParent(0) && matmulNode->getParent(0)->getOperator()->type() != Producer_Op::Type))
+    {
+        weight = matmulNode->getParent(1)->cloneSharedOperators();
+    }
+    else if ((matmulNode->getParent(0) && !matmulNode->getParent(1))
+        || (matmulNode->getParent(0) && matmulNode->getParent(0)->getOperator()->type() == Producer_Op::Type
+            && matmulNode->getParent(1) && matmulNode->getParent(1)->getOperator()->type() != Producer_Op::Type))
+    {
+        weight = matmulNode->getParent(0)->cloneSharedOperators();
+    }
+    else if (matmulNode->getParent(0) && matmulNode->getParent(0)->getOperator()->type() == Producer_Op::Type
+        && matmulNode->getParent(1) && matmulNode->getParent(1)->getOperator()->type() == Producer_Op::Type)
+    {
+        // If both inputs are producers, there is an ambiguity, but both options
+        // result in a correct solution.
+        fmt::print("Warning: both MatMul inputs are Producers, assume data at input#0 and weights at input#1.\n");
+        weight = matmulNode->getParent(1)->cloneSharedOperators();
+    }
+    AIDGE_ASSERT(weight != nullptr, "Could not deduce weight input for MatMul operator.");
 
-    std::shared_ptr<Node> weight = matmulNode->getParent(1)->cloneSharedOperators();
     // TODO: find another way to get OutChannels for FC operator.
     // This poor fix supposes that one of Add inputs is a const and has the same outChannels as the output
     DimSize_t outSize = 0;
-    const auto& op = std::dynamic_pointer_cast<OperatorTensor>(addNode->getOperator());
+    AIDGE_ASSERT(addNode->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
+    const auto& op = std::static_pointer_cast<OperatorTensor>(addNode->getOperator());
     for (size_t i = 0; i < op->nbInputs(); i++)
     {
         const auto& inTensor = op->getInput(i);
diff --git a/src/recipies/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp
similarity index 95%
rename from src/recipies/HorizontalTiling.cpp
rename to src/recipes/HorizontalTiling.cpp
index 7d3fafc0a15d1b797fdfb1a2884b62d2d8d766c5..8e27fea58014b4ec16729f3593dd656026e16826 100644
--- a/src/recipies/HorizontalTiling.cpp
+++ b/src/recipes/HorizontalTiling.cpp
@@ -15,7 +15,7 @@
 #include <vector>
 #include <utility>
 
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
@@ -36,7 +36,8 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
     if (node->getOperator()->type() != "Conv") {
         AIDGE_INTERNAL_ASSERT("Operator should be a Convolution.");
     }
-    const auto& op = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator());
+    AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
+    const auto& op = std::static_pointer_cast<OperatorTensor>(node->getOperator());
     if (op->nbOutputs() != 1 || op->nbData() > 1) {
         AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now.");
     }
diff --git a/src/recipies/LabelGraph.cpp b/src/recipes/LabelGraph.cpp
similarity index 98%
rename from src/recipies/LabelGraph.cpp
rename to src/recipes/LabelGraph.cpp
index 6966bb81d000b62d904f800233048fa58998c6fb..ac0e6bfe197460c8c422a6c1f3b3240518ee1f29 100644
--- a/src/recipies/LabelGraph.cpp
+++ b/src/recipes/LabelGraph.cpp
@@ -11,7 +11,7 @@
 
 #include <memory>
 
-#include "aidge/recipies/LabelGraph.hpp"
+#include "aidge/recipes/LabelGraph.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/AvgPooling.hpp"
diff --git a/src/recipies/RemoveDropout.cpp b/src/recipes/RemoveDropout.cpp
similarity index 97%
rename from src/recipies/RemoveDropout.cpp
rename to src/recipes/RemoveDropout.cpp
index 1dedac8f19e6ec6b4b1f6dabb6bd3e9b8c759def..d141f5d3a74e42f8f0fc5465fda043f91f37d5bc 100644
--- a/src/recipies/RemoveDropout.cpp
+++ b/src/recipes/RemoveDropout.cpp
@@ -14,7 +14,7 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 
 //Graph Regex
 #include "aidge/graphRegex/GraphRegex.hpp"
diff --git a/src/recipies/RemoveFlatten.cpp b/src/recipes/RemoveFlatten.cpp
similarity index 97%
rename from src/recipies/RemoveFlatten.cpp
rename to src/recipes/RemoveFlatten.cpp
index d571b53023b7665c25aedc869628045b3b13d509..c28c4794e9611cdedd0bd8c76a1e6d7580dc17b6 100644
--- a/src/recipies/RemoveFlatten.cpp
+++ b/src/recipes/RemoveFlatten.cpp
@@ -13,7 +13,7 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 
 
 //Graph Regex
diff --git a/src/scheduler/MemoryManager.cpp b/src/scheduler/MemoryManager.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9599dbf74f4b1044534b94014e16cebe5731c503
--- /dev/null
+++ b/src/scheduler/MemoryManager.cpp
@@ -0,0 +1,914 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <fmt/format.h>
+
+#include "aidge/scheduler/MemoryManager.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+std::shared_ptr<Aidge::MemoryManager::MemorySpace> Aidge::MemoryManager::reserve(
+    unsigned int size,
+    const std::set<std::shared_ptr<Node> >& dependencies)
+{
+    const unsigned int offset = onStack(size);
+
+    std::shared_ptr<MemorySpace> memSpace
+        = std::make_shared<MemorySpace>(mClock, offset, size, dependencies);
+    mMemSpaces.push_back(memSpace);
+    return memSpace;
+}
+
+void Aidge::MemoryManager::expand(
+    std::shared_ptr<MemorySpace> memSpace,
+    unsigned int requiredSize)
+{
+    assert(std::find(mMemSpaces.begin(), mMemSpaces.end(), memSpace)
+            != mMemSpaces.end());
+
+    memSpace->size = std::max(memSpace->size, requiredSize);
+
+    // Rebuild the stack from the beginning, taking into account the new size.
+    // Everything else stay the same.
+    mMemStack.clear();
+
+    for (Clock_T clock = 0; clock <= mClock; ++clock) {
+        for (std::vector<std::shared_ptr<MemorySpace> >::iterator
+            it = mMemSpaces.begin(), itEnd = mMemSpaces.end(); it != itEnd;
+            ++it)
+        {
+            if ((*it)->allocated == clock)
+                (*it)->offset = onStack((*it)->size);
+        }
+
+        // MemorySpace released at clock are still valid until the next tick;
+        // make sure offStack() only append after all onStack() are done.
+        for (std::vector<std::shared_ptr<MemorySpace> >::iterator
+            it = mMemSpaces.begin(), itEnd = mMemSpaces.end(); it != itEnd;
+            ++it)
+        {
+            if ((*it)->released == clock && (*it)->dependencies.empty())
+                offStack((*it)->offset);
+        }
+    }
+}
+
+Aidge::MemoryManager::MemoryPlane Aidge::MemoryManager::allocate(
+    unsigned int size,
+    const std::set<std::shared_ptr<Node> >& dependencies,
+    unsigned int stride,
+    unsigned int length,
+    unsigned int count)
+{
+    const unsigned int fullSize = std::max(size, stride) * length * count;
+    return MemoryPlane(reserve(fullSize, dependencies),
+                       mClock, 0, size, stride, length, count);
+}
+
+unsigned int Aidge::MemoryManager::allocate(
+    const std::shared_ptr<Node>& node,
+    unsigned int size,
+    const std::set<std::shared_ptr<Node> >& dependencies,
+    unsigned int stride,
+    unsigned int length,
+    unsigned int count)
+{
+    std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >::iterator it;
+    std::tie(it, std::ignore) = mMemPlanes.insert(std::make_pair(node,
+                                                std::vector<MemoryPlane>()));
+
+    (*it).second.push_back(allocate(size, dependencies, stride, length, count));
+    return ((*it).second.size() - 1);
+}
+
+bool Aidge::MemoryManager::isWrapAround(
+    std::shared_ptr<MemorySpace> memSpace,
+    unsigned int offset,
+    unsigned int size,
+    unsigned int stride,
+    unsigned int length,
+    unsigned int count) const
+{
+    const unsigned int fullSize = std::max(size, stride) * length * count;
+    return (offset + fullSize > memSpace->size);
+}
+
+Aidge::MemoryManager::MemoryPlane Aidge::MemoryManager::reallocate(
+    std::shared_ptr<MemorySpace> memSpace,
+    unsigned int offset,
+    unsigned int size,
+    bool wrapAround,
+    unsigned int extraSize,
+    const std::set<std::shared_ptr<Node> >& additionalDependencies,
+    unsigned int stride,
+    unsigned int length,
+    unsigned int count)
+{
+    const unsigned int fullSize = std::max(size, stride) * length * count;
+    unsigned int requiredSize = offset + fullSize;
+
+    if (wrapAround) {
+        requiredSize = fullSize + extraSize;
+
+        if (count > 1) {
+            // (requiredSize - offset) must be a multiple of (stride * length)
+            requiredSize = offset
+                + std::ceil((requiredSize - offset)
+                    / static_cast<double>(std::max(size, stride) * length))
+                        * (std::max(size, stride) * length);
+        }
+        else if (length > 1) {
+            // (requiredSize - offset) must be a multiple of stride
+            requiredSize = offset
+                + std::ceil((requiredSize - offset)
+                    / static_cast<double>(std::max(size, stride)))
+                        * std::max(size, stride);
+        }
+    }
+
+    if (requiredSize > memSpace->size || memSpace->released >= 0) {
+        // Expand in size and/or duration.
+        // If memSpace was already released, put it back on the stack
+        memSpace->released = -1;
+        expand(memSpace, requiredSize);
+    }
+
+    memSpace->dependencies.insert(additionalDependencies.begin(),
+                                  additionalDependencies.end());
+
+    return MemoryPlane(memSpace, mClock, offset, size, stride, length, count);
+}
+
+Aidge::MemoryManager::MemoryPlane Aidge::MemoryManager::reallocate(
+    const MemoryPlane& memPlane,
+    unsigned int extraOffset,
+    unsigned int size,
+    bool wrapAround,
+    unsigned int extraSize,
+    const std::set<std::shared_ptr<Node> >& additionalDependencies,
+    unsigned int stride,
+    unsigned int length,
+    unsigned int count)
+{
+    const unsigned int initialOffset = memPlane.getFinalOffset()
+        - memPlane.memSpace->offset + extraOffset;
+    const unsigned int fullSize = std::max(size, stride) * length * count;
+    unsigned int requiredSize = initialOffset + fullSize;
+
+    if (wrapAround) {
+        requiredSize = fullSize + extraSize;
+
+        if (count > 1) {
+            // (requiredSize - offset) must be a multiple of (stride * length)
+            requiredSize = initialOffset
+                + std::ceil((requiredSize - initialOffset)
+                    / static_cast<double>(std::max(size, stride) * length))
+                        * (std::max(size, stride) * length);
+        }
+        else if (length > 1) {
+            // (requiredSize - offset) must be a multiple of stride
+            requiredSize = initialOffset
+                + std::ceil((requiredSize - initialOffset)
+                    / static_cast<double>(std::max(size, stride)))
+                        * std::max(size, stride);
+        }
+
+        // Make sure that the intended margin with previous memPlane will be
+        // respected, as it may actually be lower because of the floor()
+        // in the memPlane getLimit() function.
+        if (memPlane.count > 1) {
+            requiredSize = memPlane.offset
+                + std::ceil((requiredSize - memPlane.offset)
+                    / static_cast<double>(memPlane.stride * memPlane.length))
+                        * (memPlane.stride * memPlane.length);
+        }
+        else if (memPlane.length > 1) {
+            requiredSize = memPlane.offset
+                + std::ceil((requiredSize - memPlane.offset)
+                    / static_cast<double>(memPlane.stride))
+                        * memPlane.stride;
+        }
+    }
+
+    if (requiredSize > memPlane.memSpace->size
+        || memPlane.memSpace->released >= 0)
+    {
+        // Expand in size and/or duration.
+        // If memSpace was already released, put it back on the stack
+        memPlane.memSpace->released = -1;
+        expand(memPlane.memSpace, requiredSize);
+    }
+
+    memPlane.memSpace->dependencies.insert(
+        additionalDependencies.begin(),
+        additionalDependencies.end());
+
+    const unsigned int finalOffset = memPlane.getFinalOffset()
+        - memPlane.memSpace->offset + extraOffset;
+
+    return MemoryPlane(memPlane.memSpace, mClock,
+                       finalOffset, size, stride, length, count);
+}
+
+unsigned int Aidge::MemoryManager::reallocate(
+    const MemoryPlane& memPlane,
+    const std::shared_ptr<Node>& node,
+    unsigned int extraOffset,
+    unsigned int size,
+    bool wrapAround,
+    unsigned int extraSize,
+    const std::set<std::shared_ptr<Node> >& additionalDependencies,
+    unsigned int stride,
+    unsigned int length,
+    unsigned int count)
+{
+    std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >::iterator it;
+    std::tie(it, std::ignore) = mMemPlanes.insert(std::make_pair(node,
+                                                std::vector<MemoryPlane>()));
+
+    (*it).second.push_back(reallocate(memPlane, extraOffset, size, wrapAround,
+                                      extraSize, additionalDependencies,
+                                      stride, length, count));
+    return ((*it).second.size() - 1);
+}
+
+unsigned int Aidge::MemoryManager::reallocate(
+    std::shared_ptr<MemorySpace> memSpace,
+    const std::shared_ptr<Node>& node,
+    unsigned int offset,
+    unsigned int size,
+    bool wrapAround,
+    unsigned int extraSize,
+    const std::set<std::shared_ptr<Node> >& additionalDependencies,
+    unsigned int stride,
+    unsigned int length,
+    unsigned int count)
+{
+    std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >::iterator it;
+    std::tie(it, std::ignore) = mMemPlanes.insert(std::make_pair(node,
+                                                std::vector<MemoryPlane>()));
+
+    (*it).second.push_back(reallocate(memSpace, offset, size, wrapAround,
+                                      extraSize, additionalDependencies,
+                                      stride, length, count));
+    return ((*it).second.size() - 1);
+}
+
+unsigned int Aidge::MemoryManager::release(std::shared_ptr<MemorySpace> memSpace)
+{
+    if (memSpace->released == -1) {
+        memSpace->released = mClock;
+
+        if (memSpace->dependencies.empty())
+            return offStack(memSpace->offset);
+    }
+
+    return 0;
+}
+
+unsigned int Aidge::MemoryManager::release(const std::shared_ptr<Node>& node)
+{
+    const std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
+        ::iterator it = mMemPlanes.find(node);
+
+    if (it == mMemPlanes.end()) {
+        fmt::print("Warning: release(): there is no allocated memory for node {}\n", node->name());
+        return 0;
+    }
+
+    unsigned int releasedMemSize = 0;
+
+    for (std::vector<MemoryPlane>::iterator itPlanes = (*it).second.begin(),
+        itPlanesEnd = (*it).second.end(); itPlanes != itPlanesEnd; ++itPlanes)
+    {
+        releasedMemSize += release((*itPlanes).memSpace);
+    }
+
+    // Remove dependencies
+    releasedMemSize += releaseDependencies(node);
+
+    return releasedMemSize;
+}
+
+unsigned int Aidge::MemoryManager::releaseDependencies(
+    const std::shared_ptr<Node>& node)
+{
+    unsigned int releasedMemSize = 0;
+
+    for (std::vector<std::shared_ptr<MemorySpace> >::iterator
+        it = mMemSpaces.begin(), itEnd = mMemSpaces.end(); it != itEnd;
+        ++it)
+    {
+        if (!(*it)->dependencies.empty()) {
+            (*it)->dependencies.erase(node);
+
+            if ((*it)->released <= mClock
+                && (*it)->dependencies.empty())
+            {
+                (*it)->released = mClock;
+                releasedMemSize += offStack((*it)->offset);
+            }
+        }
+    }
+
+    return releasedMemSize;
+}
+
+bool Aidge::MemoryManager::MaxLifetimeMinSizeFirst::operator()(
+    const std::shared_ptr<MemorySpace>& p0,
+    const std::shared_ptr<MemorySpace>& p1)
+{
+    const Clock_T lifetime0
+        = ((p0->released >= 0) ? p0->released : maxLifetime) - p0->allocated;
+    const Clock_T lifetime1
+        = ((p1->released >= 0) ? p1->released : maxLifetime) - p1->allocated;
+
+    return (lifetime0 > lifetime1
+            || (lifetime0 == lifetime1 && p0->size < p1->size));
+}
+
+bool Aidge::MemoryManager::MaxLifetimeMaxSizeFirst::operator()(
+    const std::shared_ptr<MemorySpace>& p0,
+    const std::shared_ptr<MemorySpace>& p1)
+{
+    const Clock_T lifetime0
+        = ((p0->released >= 0) ? p0->released : maxLifetime) - p0->allocated;
+    const Clock_T lifetime1
+        = ((p1->released >= 0) ? p1->released : maxLifetime) - p1->allocated;
+
+    return (lifetime0 > lifetime1
+            || (lifetime0 == lifetime1 && p0->size > p1->size));
+}
+
+bool Aidge::MemoryManager::MaxHoleMaxLifetimeFirst::operator()(
+    const std::shared_ptr<MemorySpace>& p0,
+    const std::shared_ptr<MemorySpace>& p1)
+{
+    const Clock_T lifetime0
+        = ((p0->released >= 0) ? p0->released : maxLifetime) - p0->allocated;
+    const Clock_T lifetime1
+        = ((p1->released >= 0) ? p1->released : maxLifetime) - p1->allocated;
+
+    const std::pair<Clock_T, unsigned int> maxHole0 = inst->getMaxHole(p0);
+    const std::pair<Clock_T, unsigned int> maxHole1 = inst->getMaxHole(p1);
+
+    return (maxHole0.second > maxHole1.second
+            || (maxHole0.second == maxHole1.second && lifetime0 > lifetime1));
+}
+
+void Aidge::MemoryManager::optimize(OptimizeStrategy strategy) {
+    if (strategy == None)
+        return;
+
+    const unsigned int maxLifetime = getMaxLifetime();
+
+    if (strategy == OptimizeMaxLifetimeMinSizeFirst) {
+        std::stable_sort(mMemSpaces.begin(), mMemSpaces.end(),
+                        MemoryManager::MaxLifetimeMinSizeFirst(maxLifetime));
+    }
+    else if (strategy == OptimizeMaxLifetimeMaxSizeFirst) {
+        std::stable_sort(mMemSpaces.begin(), mMemSpaces.end(),
+                        MemoryManager::MaxLifetimeMaxSizeFirst(maxLifetime));
+    }
+    else if (strategy == OptimizeMaxHoleMaxLifetimeFirst) {
+        std::stable_sort(mMemSpaces.begin(), mMemSpaces.end(),
+                        MemoryManager::MaxHoleMaxLifetimeFirst(maxLifetime, this));
+    }
+
+    std::vector<std::map<unsigned int, unsigned int> > stacks(maxLifetime + 1,
+                                        std::map<unsigned int, unsigned int>());
+
+    for (std::vector<std::shared_ptr<MemorySpace> >::const_iterator
+        it = mMemSpaces.begin(), itEnd = mMemSpaces.end(); it != itEnd; ++it)
+    {
+        const Clock_T maxT = ((*it)->released >= 0
+                                && (*it)->dependencies.empty())
+                                    ? (*it)->released : maxLifetime;
+
+        // Merge stacks over memSpace lifetime
+        std::map<unsigned int, unsigned int> mergedStacks;
+
+        for (Clock_T t = (*it)->allocated; t <= maxT; ++t) {
+            for (std::map<unsigned int, unsigned int>::iterator itMem
+                = stacks[t].begin(), itMemEnd = stacks[t].end();
+                itMem != itMemEnd; ++itMem)
+            {
+                bool newInsert;
+                std::map<unsigned int, unsigned int>::iterator itMergedMem;
+                std::tie(itMergedMem, newInsert) = mergedStacks.insert(
+                    std::make_pair((*itMem).first, (*itMem).second));
+
+                if (!newInsert) {
+                    (*itMergedMem).second = std::max((*itMergedMem).second,
+                                                     (*itMem).second);
+                }
+            }
+        }
+
+        std::map<unsigned int, unsigned int> mergedStack;
+
+        if (!mergedStacks.empty()) {
+            std::map<unsigned int, unsigned int>::iterator itMem
+                = mergedStacks.begin();
+
+            mergedStack.insert(*itMem);
+            ++itMem;
+
+            while (itMem != mergedStacks.end()) {
+                std::map<unsigned int, unsigned int>::reverse_iterator
+                    itMergedMem = mergedStack.rbegin();
+                const unsigned int nextOffset = (*itMergedMem).first
+                                                + (*itMergedMem).second;
+
+                if ((*itMem).first <= nextOffset) {
+                    (*itMergedMem).second
+                        = std::max((*itMem).first + (*itMem).second, nextOffset)
+                            - (*itMergedMem).first;
+                }
+                else
+                    mergedStack.insert(*itMem);
+
+                ++itMem;
+            }
+        }
+
+        // Allocate in merged stack
+        unsigned int offset = 0;
+        std::map<unsigned int, unsigned int>::iterator itMem
+            = mergedStack.begin();
+
+        while (true) {
+            if (itMem == mergedStack.end()
+                || (*itMem).first - offset >= (*it)->size)
+            {
+                mergedStack.insert(std::make_pair(offset, (*it)->size));
+                break;
+            }
+            else {
+                offset = (*itMem).first + (*itMem).second;
+                ++itMem;
+            }
+        }
+
+        (*it)->offset = offset;
+
+        for (Clock_T t = (*it)->allocated; t <= maxT; ++t) {
+            const std::map<unsigned int, unsigned int> stack
+                = getStack((*it), t);
+            stacks[t].insert(stack.begin(), stack.end());
+
+            //stacks[t].insert(std::make_pair(offset, (*it)->size));
+        }
+    }
+}
+
+unsigned int Aidge::MemoryManager::getOffset(const std::shared_ptr<Node>& node,
+                                            unsigned int plane) const
+{
+    const std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
+        ::const_iterator it = mMemPlanes.find(node);
+
+    if (it == mMemPlanes.end()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "getOffset(): no memory allocated for node name {}", node->name());
+    }
+
+    if (plane >= (*it).second.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "getOffset(): plane out of range for node name {}", node->name());
+    }
+
+    return ((*it).second[plane].memSpace->offset + (*it).second[plane].offset);
+}
+
+unsigned int Aidge::MemoryManager::getSize(const std::shared_ptr<Node>& node,
+                                          unsigned int plane) const
+{
+    const std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
+        ::const_iterator it = mMemPlanes.find(node);
+
+    if (it == mMemPlanes.end()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "getSize(): no memory allocated for node name {}", node->name());
+    }
+
+    if (plane >= (*it).second.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "getSize(): plane out of range for node name {}", node->name());
+    }
+
+    return (*it).second[plane].getSize();
+}
+
+unsigned int Aidge::MemoryManager::getSize(const std::shared_ptr<Node>& node)
+    const
+{
+    const std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
+        ::const_iterator it = mMemPlanes.find(node);
+
+    if (it == mMemPlanes.end()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "getSize(): no memory allocated for node name {}", node->name());
+    }
+
+    unsigned int size = 0;
+
+    for (std::vector<MemoryPlane>::const_iterator itPlanes
+        = (*it).second.begin(), itPlanesEnd = (*it).second.end();
+        itPlanes != itPlanesEnd; ++itPlanes)
+    {
+        size += (*itPlanes).getSize();
+    }
+
+    return size;
+}
+
+unsigned int Aidge::MemoryManager::getNbPlanes(const std::shared_ptr<Node>& node)
+    const
+{
+    const std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
+        ::const_iterator it = mMemPlanes.find(node);
+    return (it == mMemPlanes.end()) ? 0 : (*it).second.size();
+}
+
+unsigned int Aidge::MemoryManager::getPeakUsage() const {
+    unsigned int peakUsage = 0;
+
+    for (std::vector<std::shared_ptr<MemorySpace> >::const_iterator
+        it = mMemSpaces.begin(), itEnd = mMemSpaces.end(); it != itEnd; ++it)
+    {
+        peakUsage = std::max(peakUsage,
+                             (*it)->offset + (*it)->size);
+    }
+
+    return peakUsage;
+}
+
+Aidge::MemoryManager::Clock_T Aidge::MemoryManager::getMaxLifetime() const {
+    Clock_T maxLifetime = 0;
+
+    for (std::vector<std::shared_ptr<MemorySpace> >::const_iterator
+        it = mMemSpaces.begin(), itEnd = mMemSpaces.end(); it != itEnd; ++it)
+    {
+        maxLifetime = std::max(maxLifetime,
+            std::max((*it)->allocated, (*it)->released));
+    }
+
+    return maxLifetime;
+}
+
+const std::vector<Aidge::MemoryManager::MemoryPlane>&
+Aidge::MemoryManager::getPlanes(const std::shared_ptr<Node>& node) const
+{
+    const std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
+        ::const_iterator it = mMemPlanes.find(node);
+
+    if (it == mMemPlanes.end()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "getSize(): no memory allocated for node name {}", node->name());
+    }
+
+    return (*it).second;
+}
+
+Aidge::MemoryManager::MemMap_T
+Aidge::MemoryManager::getPlanes(std::shared_ptr<MemorySpace> memSpace)
+    const
+{
+    MemMap_T planes;
+
+    for (MemMap_T::const_iterator itNode = mMemPlanes.begin(),
+        itNodeEnd = mMemPlanes.end(); itNode != itNodeEnd; ++itNode)
+    {
+        for (std::vector<MemoryPlane>::const_iterator itPlane
+             = (*itNode).second.begin(), itPlaneEnd = (*itNode).second.end();
+             itPlane != itPlaneEnd; ++itPlane)
+        {
+            if ((*itPlane).memSpace == memSpace) {
+                std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
+                    ::iterator it;
+                std::tie(it, std::ignore) = planes.insert(
+                    std::make_pair((*itNode).first,
+                                   std::vector<MemoryPlane>()));
+
+                (*it).second.push_back((*itPlane));
+            }
+        }
+    }
+
+    return planes;
+}
+
+unsigned int Aidge::MemoryManager::getNbPlanes(
+    std::shared_ptr<MemorySpace> memSpace) const
+{
+    unsigned int count = 0;
+
+    for (std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
+        ::const_iterator itNode = mMemPlanes.begin(),
+        itNodeEnd = mMemPlanes.end(); itNode != itNodeEnd; ++itNode)
+    {
+        for (std::vector<MemoryPlane>::const_iterator itPlane
+             = (*itNode).second.begin(), itPlaneEnd = (*itNode).second.end();
+             itPlane != itPlaneEnd; ++itPlane)
+        {
+            if ((*itPlane).memSpace == memSpace)
+                ++count;
+        }
+    }
+
+    return count;
+}
+
+void Aidge::MemoryManager::tick()
+{
+    ++mClock;
+}
+
+void Aidge::MemoryManager::log(const std::string& fileName) const
+{
+    auto memData = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen(fileName.c_str(), "w"), &std::fclose);
+
+    if (!memData) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "Could not create memory layout log file: {}", fileName);
+    }
+
+    auto gnuplot = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((fileName + "_plot.gnu").c_str(), "w"), &std::fclose);
+
+    if (!gnuplot) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "Could not create memory layout log file: {}", (fileName + "_plot.gnu"));
+    }
+
+    const Clock_T maxLifetime = getMaxLifetime();
+    const unsigned int peakUsage = getPeakUsage();
+
+    fmt::print(gnuplot.get(), "#!/usr/bin/gnuplot\n");
+    fmt::print(gnuplot.get(), "set term pngcairo size 1280,768 noenhanced\n");
+    fmt::print(gnuplot.get(), "set output \"{}\"\n", fileName + "_plot.png");
+    fmt::print(gnuplot.get(), "set xrange [{}:{}]\n", 0, maxLifetime + 1);
+    fmt::print(gnuplot.get(), "set yrange [{}:{}]\n", 0, 1.05 * (peakUsage / 1024.0));
+    fmt::print(gnuplot.get(), "set xlabel \"Time\"\n");
+    fmt::print(gnuplot.get(), "set ylabel \"Memory usage (KWords)\"\n");
+    fmt::print(gnuplot.get(), "set grid\n");
+    fmt::print(gnuplot.get(), "set xtics 1\n");
+    fmt::print(gnuplot.get(), "unset key\n");
+    fmt::print(gnuplot.get(), "set palette rgbformulae 30,31,32\n");
+    fmt::print(gnuplot.get(), "unset colorbox\n");
+    fmt::print(gnuplot.get(), "N={}\n", mMemPlanes.size() + 1);
+
+    unsigned int objectId = 1;
+    unsigned int labelId = 1;
+
+    for (std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
+        ::const_iterator it = mMemPlanes.begin(), itEnd = mMemPlanes.end();
+        it != itEnd; ++it)
+    {
+        const std::string name = (*it).first->name();
+        fmt::print(memData.get(), "{}\n", name);
+
+        double minX = -1;
+        unsigned int maxY = 0;
+
+        for (std::vector<MemoryPlane>::const_iterator itPlanes
+             = (*it).second.begin(), itPlanesBegin = (*it).second.begin(),
+            itPlanesEnd = (*it).second.end(); itPlanes != itPlanesEnd;
+            ++itPlanes)
+        {
+            const unsigned int contiguousOffset
+                = (*itPlanes).getContiguousOffset();
+            const unsigned int contiguousSize = (*itPlanes).getContiguousSize();
+            const unsigned int wrappedOffset = (*itPlanes).getWrappedOffset();
+            const unsigned int wrappedSize = (*itPlanes).getWrappedSize();
+
+            const Clock_T allocated = (*itPlanes).allocated;
+            const Clock_T released = (*itPlanes).memSpace->released;
+            const bool isReleased = (released >= 0
+                                && (*itPlanes).memSpace->dependencies.empty());
+
+            fmt::print(memData.get(), "  {} {} ({:#08x}U) -> {} ({:#08x}U)",
+                (itPlanes - itPlanesBegin), contiguousOffset, contiguousOffset,
+                (contiguousOffset + contiguousSize), (contiguousOffset + contiguousSize));
+
+            if (wrappedSize > 0) {
+                fmt::print(memData.get(), " + {} ({:#08x}U) -> {} ({:#08x}U)",
+                    wrappedOffset, wrappedOffset,
+                    (wrappedOffset + wrappedSize), (wrappedOffset + wrappedSize));
+            }
+
+            fmt::print(memData.get(), " [{}] @ {}", (*itPlanes).getSize(), allocated);
+
+            if (isReleased) {
+                fmt::print(memData.get(), " to {}", released);
+            }
+
+            fmt::print(memData.get(), "\n");
+
+            // Gnuplot
+            const double startX = allocated;
+
+            if (startX < minX || minX < 0) {
+                minX = startX;
+                maxY = contiguousOffset + contiguousSize;
+            }
+
+            if ((*itPlanes).size != (*itPlanes).stride) {
+                for (unsigned int offset = contiguousOffset;
+                    offset < contiguousOffset + contiguousSize;
+                    offset += (*itPlanes).stride)
+                {
+                    fmt::print(gnuplot.get(), "set object {} rectangle from {},{} to {},{} fc palette frac ({} * 1./N)\n",
+                        (allocated * 100 + objectId), startX, (offset / 1024.0),
+                        (((isReleased) ? released : maxLifetime) + 1),
+                        (std::min((offset + (*itPlanes).size),
+                                        contiguousOffset + contiguousSize) / 1024.0),
+                        labelId);
+                    ++objectId;
+                }
+            }
+            else {
+                fmt::print(gnuplot.get(), "set object {} rectangle from {},{} to {},{} fc palette frac ({} * 1./N)\n",
+                    (allocated * 100 + objectId), startX, (contiguousOffset / 1024.0),
+                    (((isReleased) ? released : maxLifetime) + 1),
+                    ((contiguousOffset + contiguousSize) / 1024.0),
+                    labelId);
+                ++objectId;
+            }
+
+            if (wrappedSize > 0) {
+                fmt::print(gnuplot.get(), "set object {} rectangle from {},{} to {},{} fc palette frac ({} * 1./N)\n",
+                    (allocated * 100 + objectId), startX, (wrappedOffset / 1024.0),
+                    (((isReleased) ? released : maxLifetime) + 1),
+                    ((wrappedOffset + contiguousSize) / 1024.0),
+                    labelId);
+                ++objectId;
+
+                fmt::print(gnuplot.get(), "set arrow from {},{} to {},{} nohead\n",
+                    startX, (contiguousOffset / 1024.0),
+                    (startX + 0.1), (contiguousOffset / 1024.0));
+
+                fmt::print(gnuplot.get(), "set arrow from {},{} to {},{} nohead\n",
+                    (startX + 0.05), ((contiguousOffset + contiguousSize) / 1024.0),
+                    (startX + 0.05), (wrappedOffset / 1024.0));
+            }
+        }
+
+        fmt::print(gnuplot.get(), "set label {} '{}' at {},{} rotate by 30 font \",8\" offset char 0.5,0.5\n",
+            labelId, name, minX, (maxY / 1024.0));
+        ++labelId;
+
+        fmt::print(memData.get(), "\n");
+    }
+
+    fmt::print(gnuplot.get(), "set arrow from 0,{} to {},{} nohead lc rgb \"red\"\n",
+        (peakUsage / 1024.0), (maxLifetime + 1),
+        (peakUsage / 1024.0));
+
+    fmt::print(gnuplot.get(), "set label {} 'Peak usage = {} KWords' at 0,{} textcolor rgb \"red\" offset char 0.5,0.5\n",
+        labelId, (peakUsage / 1024.0), (peakUsage / 1024.0));
+
+    fmt::print(gnuplot.get(), "plot 0\n");
+}
+
+unsigned int Aidge::MemoryManager::onStack(unsigned int size)
+{
+    unsigned int offset = 0;
+    std::map<unsigned int, unsigned int>::iterator itMem = mMemStack.begin();
+
+    while (true) {
+        if (itMem == mMemStack.end()
+            || (*itMem).first - offset >= size)
+        {
+            mMemStack.insert(std::make_pair(offset, size));
+            break;
+        }
+        else {
+            offset = (*itMem).first + (*itMem).second;
+            ++itMem;
+        }
+    }
+
+    return offset;
+}
+
+unsigned int Aidge::MemoryManager::offStack(unsigned int offset)
+{
+    std::map<unsigned int, unsigned int>::iterator itMem
+        = mMemStack.find(offset);
+
+    if (itMem == mMemStack.end()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "offStack(): offset not found in stack");
+    }
+    else {
+        const unsigned int size = (*itMem).second;
+        mMemStack.erase(offset);
+        return size;
+    }
+}
+
+std::map<unsigned int, unsigned int> Aidge::MemoryManager::getStack(
+    std::shared_ptr<MemorySpace> memSpace,
+    Clock_T clock) const
+{
+    // Find all planes associated to memSpace and index them by their allocated
+    // value in a map
+    std::map<Clock_T, std::vector<MemoryPlane> > planes;
+
+    for (std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
+        ::const_iterator itNode = mMemPlanes.begin(),
+        itNodeEnd = mMemPlanes.end(); itNode != itNodeEnd; ++itNode)
+    {
+        for (std::vector<MemoryPlane>::const_iterator itPlane
+             = (*itNode).second.begin(), itPlaneEnd = (*itNode).second.end();
+             itPlane != itPlaneEnd; ++itPlane)
+        {
+            if ((*itPlane).memSpace == memSpace) {
+                std::map<Clock_T, std::vector<MemoryPlane> >::iterator it;
+                std::tie(it, std::ignore) = planes.insert(
+                    std::make_pair((*itPlane).allocated,
+                                   std::vector<MemoryPlane>()));
+
+                (*it).second.push_back((*itPlane));
+            }
+        }
+    }
+
+    // Find the planes allocated at time clock or the one just before
+    // => obtain all the planes that are considered valid at the time clock
+    Clock_T c = clock;
+    std::map<Clock_T, std::vector<MemoryPlane> >::iterator itPlanes;
+
+    do
+        itPlanes = planes.find(c);
+    while (itPlanes == planes.end() && (c--) > 0);
+
+    assert(itPlanes != planes.end());
+
+    // Fill the stack at time clock
+    std::map<unsigned int, unsigned int> stack;
+
+    for (std::vector<MemoryPlane>::const_iterator
+        it = (*itPlanes).second.begin(), itEnd = (*itPlanes).second.end();
+        it != itEnd; ++it)
+    {
+        stack.insert(std::make_pair((*it).getContiguousOffset(),
+                                    (*it).getContiguousSize()));
+
+        if ((*it).getWrappedSize() > 0) {
+            stack.insert(std::make_pair((*it).getWrappedOffset(),
+                                        (*it).getWrappedSize()));
+        }
+    }
+
+    return stack;
+}
+
+std::pair<Aidge::MemoryManager::Clock_T, unsigned int>
+Aidge::MemoryManager::getMaxHole(std::shared_ptr<MemorySpace> memSpace) const
+{
+    std::map<Clock_T, unsigned int> holesSize;
+
+    for (std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
+        ::const_iterator itNode = mMemPlanes.begin(),
+        itNodeEnd = mMemPlanes.end(); itNode != itNodeEnd; ++itNode)
+    {
+        for (std::vector<MemoryPlane>::const_iterator itPlane
+             = (*itNode).second.begin(), itPlaneEnd = (*itNode).second.end();
+             itPlane != itPlaneEnd; ++itPlane)
+        {
+            if ((*itPlane).memSpace == memSpace) {
+                const unsigned int holeSize = memSpace->size
+                    - (*itPlane).getContiguousSize()
+                    - (*itPlane).getWrappedSize();
+
+                std::map<Clock_T, unsigned int>::iterator it;
+                bool newInsert;
+                std::tie(it, newInsert) = holesSize.insert(
+                    std::make_pair((*itPlane).allocated, holeSize));
+
+                if (!newInsert) {
+                    // Another plane exists at the same time, one must substract
+                    // the size of this other plane from the hole size
+                    (*it).second = std::max(0, static_cast<int>((*it).second)
+                        - static_cast<int>((*itPlane).getContiguousSize())
+                        - static_cast<int>((*itPlane).getWrappedSize()));
+                }
+            }
+        }
+    }
+
+    return *std::max_element(holesSize.begin(),
+                             holesSize.end(),
+                             [](const auto& left, const auto& right) {
+                                return std::max(left.second, right.second);
+                             });
+}
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 380ff8bf3ebabc1a7f7bf7c6f53d05fe99ab30dd..6c827f236167c8bce4fd5a39c392f00ac8fe6649 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -16,10 +16,16 @@
 #include <set>
 #include <string>
 
+#include <fmt/ranges.h>
+#include <fmt/color.h>
+
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/Memorize.hpp"
+#include "aidge/operator/MetaOperator.hpp"
 
 void drawProgressBar(double progress, int barWidth, const std::string& additionalInfo = "") {
     putchar('[');
@@ -30,7 +36,7 @@ void drawProgressBar(double progress, int barWidth, const std::string& additiona
         else
             putchar(' ');
     }
-    printf("] %d%% | %s\r", static_cast<int>(progress * 100), additionalInfo.c_str());
+    fmt::print("] {}% | {}\r", static_cast<int>(progress * 100), additionalInfo);
     fflush(stdout);
 }
 
@@ -40,63 +46,124 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
     // TODO: handle memory allocation in scheduler
     // TODO: optimize memory usage
 
-    // setup initial producers list
+    // 1) Setup initial consumers list:
+    // It is the list of input nodes
+    std::set<std::shared_ptr<Node>> consumers = mGraphView->inputNodes();
+    // Plus the list of nodes inside the graph connected to an inner producer
     std::set<std::shared_ptr<Node>> producers;
     for (const std::shared_ptr<Node>& nodePtr : mGraphView->getNodes()) {
-        if (nodePtr->type() == "Producer") {
+        if (nodePtr->type() == Producer_Op::Type) {
             producers.insert(nodePtr);
         }
     }
-    // add Data Input
-    // FIXME : should be changed when the real system for providing
-    // data is implemented
-    for (const std::shared_ptr<Node>& nodePtr : mGraphView->inputNodes()) {
-        for (const auto& parentPtr : nodePtr->getParents()) {
-            if ((mGraphView->getNodes()).find(parentPtr) == (mGraphView->getNodes()).end()) {
-                // Node not found in the graph, it's an outside producer
-                producers.insert(parentPtr);
+    const auto producersConsumers = getConsumers(producers);
+    consumers.insert(producersConsumers.begin(), producersConsumers.end());
+
+    std::map<std::shared_ptr<Node>, std::string> namePtrTable;
+    if (verbose) namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})");
+
+    // Still consumers are consumers that were run by can still consume data.
+    // They must be run AFTER the remaining consumer to ensure a non-greedy
+    // producers-consumers model!
+    std::set<std::shared_ptr<Node>> stillConsumers;
+
+    mStaticSchedule.push_back(std::vector<std::shared_ptr<Node>>());
+
+    do {
+        // 2) From the current consumers list, check if any prior consumer node
+        // is needed. A prior will generally be required for any node consuming 
+        // parameters (weights and bias) that is not an input node.
+        // If for a given node, only parent producers (at any depth) are needed
+        // to satisfy its required data, it becomes a prior.
+        // If the prior node is a producer, it is added to the list of required
+        // producers.
+        // If the prior node is of another type, it replaces the initial consumer
+        // in the new priorConsumers list. The initial consumer will become 
+        // again a consumer later, by construction.
+        if (verbose) fmt::print("List of consumers with their priors:\n");
+        std::set<std::shared_ptr<Node>> requiredProducers;
+        std::set<std::shared_ptr<Node>> priorConsumers;
+
+        for (const auto& consumer : consumers) {
+            if (verbose) {
+                fmt::print("\t- consumer: ");
+                fmt::print(fg(fmt::color::orange), namePtrTable[consumer]);
+                fmt::print("\n");
+            }
+
+            const auto& prior = getPriorProducersConsumers(consumer);
+
+            if (prior.isPrior) {
+                if (verbose) {
+                    std::vector<std::string> requiredProducersName;
+                    std::transform(prior.requiredProducers.begin(), prior.requiredProducers.end(),
+                        std::back_inserter(requiredProducersName),
+                        [&namePtrTable](auto val){ return namePtrTable[val]; });
+                    fmt::print("\t\trequired producers: {}\n", requiredProducersName);
+
+                    std::vector<std::string> priorConsumersName;
+                    std::transform(prior.priorConsumers.begin(), prior.priorConsumers.end(),
+                        std::back_inserter(priorConsumersName),
+                        [&namePtrTable](auto val){ return namePtrTable[val]; });
+                    fmt::print("\t\tprior consumers: {}\n", priorConsumersName);
+                }
+
+                requiredProducers.insert(prior.requiredProducers.cbegin(), prior.requiredProducers.cend());
+                priorConsumers.insert(prior.priorConsumers.cbegin(), prior.priorConsumers.cend());
+            }
+            else {
+                priorConsumers.insert(consumer);
             }
         }
-    }
 
-    // setup consumer list
-    // std::set<std::shared_ptr<Node>> consumers = getConsumers(producers);
+        // 3) Prior consumers replace the initial consumers list.
+        // By construction, initial consumers will necessarily become consumers
+        // again later.
+        consumers.swap(priorConsumers);
 
-    /* It may not be necessary to initialize producer */
-    std::set<std::shared_ptr<Node>> consumers = mGraphView->inputNodes();
-    do {
-        // find runnable consumers
+        // 4) Make producers generate the required data.
+        // Producers are special nodes that generate data on demand.
+        for (const auto& requiredProducer : requiredProducers) {
+            requiredProducer->getOperator()->updateConsummerProducer();
+            mStaticSchedule.back().push_back(requiredProducer);
+        }
+
+        // 5) Find runnable consumers.
+        // A consumer is runnable if the required data is available for all of 
+        // its inputs. At this point, not all consumers are necessarily
+        // runnable because some may depend on the execution of others (when
+        // there is multiple successive priors for example).
         std::set<std::shared_ptr<Node>> runnableConsumers;
-        if (verbose) printf("List of layers receiving data:\n");
+        if (verbose) fmt::print("Updated list of consumers:\n");
         for (const auto& consumer : consumers) {
             if (verbose) {
-                printf("\t- consumer: "
-                       "\x1b[1;37m"
-                       "%s"
-                       "\x1b[0m"
-                       "\n\t\tR/C:\t",
-                       (consumer->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(consumer.get()))).c_str());
+                fmt::print("\t- consumer: ");
+                fmt::print(fg(fmt::color::orange), namePtrTable[consumer]);
+                fmt::print("\n\t\tC/R:\t");
                 for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) {
-                    printf("%zu/%zu\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
+                    fmt::print("{}/{}\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
                            consumer->getOperator()->getNbRequiredData(inId));
                 }
-                printf("%zu/%zu", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
+                fmt::print("{}/{}", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
                        consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
-                printf("\n\t\tP:\t");
+                fmt::print("\n\t\tP:\t");
                 for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) {
-                    printf("%zu\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
+                    fmt::print("{}\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
                 }
-                printf("%zu", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
-                printf("\n");
+                fmt::print("{}", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
+                fmt::print("\n");
             }
+            
             bool isRunnable = true;
+            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) {
+                if (/*consumer->getOperator()->getNbRequiredData(inputIdx) > 0
+                    && */(consumer->getOperator()->getNbConsumedData(inputIdx) + consumer->getOperator()->getNbRequiredData(inputIdx)) >
+                            getNbAvailableData(consumer, inputIdx)) {
+                    if (verbose) fmt::print("  not runnable: C{} + R{} > P{} for input #{}\n",
+                        consumer->getOperator()->getNbConsumedData(inputIdx),
+                        consumer->getOperator()->getNbRequiredData(inputIdx),
+                        getNbAvailableData(consumer, inputIdx), inputIdx);
 
-            IOIndex_t parentID = 0;  // FIXME: handle this correctly
-            // Check every input has got enought data to run
-            for (const auto& consumerParent : consumer->dataInputs()) {
-                if (consumerParent.first &&
-                    consumer->getOperator()->getNbRequiredData(parentID++) >
-                            consumerParent.first->getOperator()->getNbProducedData(consumerParent.second)) {
                     // not enough data to run
                     isRunnable = false;
                     break;
@@ -108,70 +175,249 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
             }
         }
 
-        // Push consumers in the list of nodes to run and update the consumer producer system
+        // 5) If not consumer is runnable, it is a stop condition!
+        if (runnableConsumers.empty()) {
+            if (verbose) fmt::print("********************\n");
+            // No consumer is runnable: some required data is missing for all of
+            // them. There is two possibilities:
+            // - At least one required data source is exhausted, which may be
+            //   an expected stop condition.
+            // - There is a deadlock between consumers, if some one is waiting
+            //   for data from the other and reciprocally.
+            break;
+        }
+
+        // 6) Push runnable consumers in the list of nodes to run and update the
+        // consumer producer system.
+        // At this point, simultaneously runnable consumers have no data 
+        // dependency and could be run in parallel!
         for (const auto& runnable : runnableConsumers) {
-            if (verbose) printf("Runnable: %s\n", (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str());
+            if (verbose) fmt::print("Runnable: {}\n", namePtrTable[runnable]);
             runnable->getOperator()->updateConsummerProducer();
-            mStaticSchedule.push_back(runnable);
+            mStaticSchedule.back().push_back(runnable);
         }
 
-        // update producers and consumers list
-        if (verbose) printf("Updating producer and consumer lists...\n");
-        const auto oldConsumers = consumers;
-
-        for (const auto& consumer : oldConsumers) {
+        // 7) Update consumers list
+        if (verbose) fmt::print("Updating producer and consumer lists...\n");
+        for (const auto& consumer : runnableConsumers) {
             if (verbose) {
-                printf("\t- consumer: %s\n\t\tR/C:\t",
-                       (consumer->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(consumer.get()))).c_str());
+                fmt::print("\t- consumer: {}\n\t\tC/R:\t",
+                       namePtrTable[consumer]);
                 for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) {
-                    printf("%ld/%ld\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
+                    fmt::print("{}/{}\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
                            consumer->getOperator()->getNbRequiredData(inId));
                 }
-                printf("%zu/%zu", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
+                fmt::print("{}/{}", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
                        consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
-                printf("\n\t\tP:\t");
+                fmt::print("\n\t\tP:\t");
                 for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) {
-                    printf("%zu\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
+                    fmt::print("{}\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
                 }
-                printf("%zu", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
-                printf("\n");
+                fmt::print("{}", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
+                fmt::print("\n");
             }
+
+            // 7.1) If the current consumer has still data to consume, it will
+            // be put back in the consumers list once the remaining consumers
+            // have been exhausted.
             bool isStillConsumer = false;
+            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) {
+                if (consumer->getOperator()->getNbConsumedData(inputIdx) <
+                            getNbAvailableData(consumer, inputIdx)) {
+                    if (verbose) fmt::print("  still consumer: C{} < P{} for input #{}\n",
+                        consumer->getOperator()->getNbConsumedData(inputIdx),
+                        getNbAvailableData(consumer, inputIdx), inputIdx);
 
-            IOIndex_t parentID = 0;  // FIXME: handle this correctly
-            // should we check input or dataInput ?
-            for (const auto& consumerParent : consumer->inputs()) {
-                if (consumerParent.first &&
-                    consumer->getOperator()->getNbConsumedData(parentID++) <
-                            consumerParent.first->getOperator()->getNbProducedData(consumerParent.second)) {
                     // there is still data to consume
                     isStillConsumer = true;
                     break;
                 }
             }
 
+            // 7.2) If the current consumer becomes a producer for other nodes,
+            // its childs become consumers.
+            bool isProducer = false;
             for (IOIndex_t outId = 0; outId < consumer->nbOutputs(); ++outId) {
+                for (const auto& child : consumer->getChildren(outId)) {
+                    if (child) {
+                        IOIndex_t inputIdx = 0;
+                        for (const auto& childParent : child->getParents()) {
+                            if (childParent == consumer) {
+                                if (consumer->getOperator()->getNbProducedData(outId) > child->getOperator()->getNbConsumedData(inputIdx)) {
+                                    isProducer = true;
+                                }
+                            }
+                            ++inputIdx;
+                        }
+                    }
+                }
+/*
                 if (consumer->getOperator()->getNbProducedData(outId) > 0) {
-                    if (verbose) printf("  also producer\n");
+                    if (verbose) fmt::print("  also producer\n");
                     // make sure consumer is also a producer
                     producers.insert(consumer);
 
-                    const auto& childs = consumer->getChildren();
-                    consumers.insert(childs.begin(), childs.end());
+                    const auto& newConsumers = getConsumers({consumer});
+                    consumers.insert(newConsumers.cbegin(), newConsumers.cend());
                     break;
                 }
+*/
             }
 
-            if (!isStillConsumer) {
-                if (verbose) printf("  no more consumer\n");
-                // consumer is no longer a consumer, only a producer
-                consumers.erase(consumer);
+            consumers.erase(consumer);
+
+            if (isProducer) {
+                if (verbose) fmt::print("  also producer\n");
+                // make sure consumer is also a producer
+                producers.insert(consumer);
+
+                const auto& newConsumers = getConsumers({consumer});
+                consumers.insert(newConsumers.cbegin(), newConsumers.cend());
             }
+
+            if (isStillConsumer) {
+                // If there is still data to consume, the consumer will be
+                // run AFTER the other remaining consumers
+                // (= non-greedy consumers)
+                stillConsumers.insert(consumer);
+            }
+        }
+
+        // 8) If there is no more consumers, swap with possible "still consumers"
+        // This ensures that the "non-greedy" consumer behavior
+        if (consumers.empty()) {
+            consumers.swap(stillConsumers);
+            stillConsumers.clear();
         }
 
-        if (verbose) printf("*************\n");
+        if (verbose) fmt::print("********************\n");
     } while (!consumers.empty());
 
+    if (verbose) {
+        if (!consumers.empty()) {
+            fmt::print("/!\\ Remaining consumers: possible dead-lock\n");
+            fmt::print("********************\n");
+        }
+    }
+}
+
+void Aidge::SequentialScheduler::resetScheduling() {
+    for (auto node : mGraphView->getNodes()) {
+        node->getOperator()->resetConsummerProducer();
+    }
+
+    mStaticSchedule.clear();
+    mStaticScheduleStep = 0;
+    mScheduling.clear();
+}
+
+/**
+ * This version is a simplified version without special handling of concatenation.
+*/
+Aidge::MemoryManager Aidge::SequentialScheduler::generateMemory(bool incProducers, bool wrapAroundBuffer) const {
+    MemoryManager memManager;
+
+    for (const auto& shedule : mStaticSchedule) {
+        for (const auto& node : shedule) {
+            if (!incProducers && node->type() == Producer_Op::Type) {
+                memManager.releaseDependencies(node);
+                continue;
+            }
+            
+            const auto childs = node->getChildren();
+            AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
+            const auto op = std::static_pointer_cast<OperatorTensor>(node->getOperator());
+
+            std::vector<const MemoryManager::MemoryPlane*> wrapAroundMemPlane;
+
+            // Allocate a memory plane for each node's output
+            for (IOIndex_t outputIdx = 0; outputIdx < node->nbOutputs(); ++outputIdx) {
+                const size_t requiredSize = op->getRequiredMemory(outputIdx, {});
+
+                // By default, specifies a fully monolithic memory block
+                size_t size = requiredSize;
+                size_t stride = 0;
+                size_t length = 1;
+                size_t count = 1;
+
+                if (op->getOutput(outputIdx) && op->getOutput(outputIdx)->dims().size() > 3) {
+                    // If it is possible, assume a NCHW layout
+                    size = op->getOutput(outputIdx)->dims().end()[-3];
+                    stride = size;
+                    length = op->getOutput(outputIdx)->dims().end()[-1];
+                    count = op->getOutput(outputIdx)->dims().end()[-2];
+                }
+                
+                // Check if wrap around buffer is possible for this node
+                // (re-using previous node outputs memory for this node outputs).
+                // => only if this node is the only child of its parent(s)
+                size_t wrapAroundSize = 0;
+                size_t wrapAroundExtra = 0;
+                wrapAroundMemPlane.push_back(nullptr);
+
+                // Select the best parent among all allocable nodes for 
+                // reallocation, which is the one with most memory (in order
+                // to minimize the reallocation size).
+                IOIndex_t inputIdx = 0;
+                for (const auto& parent : node->dataInputs()) {
+                    if (parent.first && parent.first->getChildren(parent.second).size() == 1
+                        // there might be no existing plane if the parent was
+                        // not yet scheduled (because it may be a recurrent connection)
+                        && memManager.getNbPlanes(parent.first) >= parent.first->nbOutputs()
+                        // memSpace should not be already released
+                        && memManager.getPlanes(parent.first).end()[-parent.first->nbOutputs()+parent.second].memSpace->released == -1)
+                    {
+                        const bool isWrappable = (op->getNbRequiredProtected(inputIdx) < op->getNbRequiredData(inputIdx));
+                        const MemoryManager::MemoryPlane& memPlane = memManager.getPlanes(parent.first).end()[-parent.first->nbOutputs()+parent.second];
+
+                        if (isWrappable || !memManager.isWrapAround(
+                                    memPlane.memSpace,
+                                    memPlane.getFinalOffset()
+                                        - memPlane.memSpace->offset,
+                                    requiredSize))
+                        {
+                            if (memPlane.getSize() > wrapAroundSize + op->getNbRequiredProtected(inputIdx)
+                                && std::find(wrapAroundMemPlane.begin(), wrapAroundMemPlane.end(), &memPlane) == wrapAroundMemPlane.end())
+                            {
+                                wrapAroundSize = memPlane.getSize() - op->getNbRequiredProtected(inputIdx);
+                                if (requiredSize > wrapAroundSize) {
+                                    wrapAroundExtra = requiredSize - wrapAroundSize;
+                                }
+                                wrapAroundMemPlane[outputIdx] = &memPlane;
+                            }
+
+                            if (wrapAroundExtra == 0) {
+                                break;
+                            }
+                        }
+                    }
+                    ++inputIdx;
+                }
+
+                // MemoryPlane to (re)use
+                const MemoryManager::MemoryPlane& memPlane
+                    = (wrapAroundBuffer && wrapAroundSize > 0)
+                        ? (*wrapAroundMemPlane[outputIdx]) :
+                            memManager.allocate(requiredSize, childs, stride, length, count);
+
+                if (wrapAroundBuffer && wrapAroundSize > 0) {
+                    memManager.reallocate(memPlane,
+                        node, 0,
+                        requiredSize, true, wrapAroundExtra, childs, stride, length, count);
+                }
+                else {
+                    memManager.reallocate(memPlane.memSpace,
+                        node, memPlane.offset,
+                        requiredSize, false, 0, childs, stride, length, count);
+                }
+            }
+
+            memManager.releaseDependencies(node);
+            memManager.tick();
+        }
+    }
+
+    return memManager;
 }
 
 void Aidge::SequentialScheduler::connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data){
@@ -188,7 +434,6 @@ void Aidge::SequentialScheduler::connectInputs(std::vector<std::shared_ptr<Aidge
 }
 
 
-// TODO: handle multiple inputs/outputs
 void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::vector<std::shared_ptr<Aidge::Tensor>> data) {
     
     // Collect all data input of the graph (that are producers)
@@ -203,21 +448,19 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::ve
     // If scheduling was already generated (in one or several steps, i.e. one or
     // several successive call to generateScheduling()), do not generate it twice
     if (mStaticSchedule.empty()) {
-        this->generateScheduling();
+        this->generateScheduling(verbose);
     }
 
-    // Clear previous scheduling results
-    mScheduling.clear();
+    std::map<std::shared_ptr<Node>, std::string> namePtrTable;
+    if (verbose) namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})");
 
-    int cpt = 0;
-    for (const auto& runnable : mStaticSchedule) {
+    size_t cpt = 0;
+    for (const auto& runnable : mStaticSchedule.at(mStaticScheduleStep)) {
         if (verbose)
-            printf("run: %s\n",
-                    (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str());
+            fmt::print("run: {}\n", namePtrTable[runnable]);
         else
             drawProgressBar(static_cast<float>(cpt) / static_cast<float>(mStaticSchedule.size()), 50,
-                            (std::string("running ") + runnable->type() + "_" +
-                                std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))));
+                            (std::string("running ") + namePtrTable[runnable]));
         const auto tStart = std::chrono::high_resolution_clock::now();
         runnable->forward();
         const auto tEnd = std::chrono::high_resolution_clock::now();
@@ -225,27 +468,42 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::ve
         cpt++;
     }
     if (!verbose) drawProgressBar(1.0, 50, "                                   ");
-    printf("\n");
+    fmt::print("\n");
+
+    ++mStaticScheduleStep;
+    if (mStaticScheduleStep == mStaticSchedule.size()) {
+        mStaticScheduleStep = 0;
+    }
 }
 
 void Aidge::SequentialScheduler::saveSchedulingDiagram(const std::string& fileName) const {
-    FILE* fp = std::fopen((fileName + ".mmd").c_str(), "w");
-    std::fprintf(fp, "gantt\ndateFormat x\naxisFormat %%Q ms\n\n");
+    auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((fileName + ".mmd").c_str(), "w"), &std::fclose);
+
+    if (!fp) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "Could not create scheduling diagram log file: {}", fileName + ".mmd");
+    }
+
+    fmt::print(fp.get(), "gantt\ndateFormat x\naxisFormat %Q µs\n\n");
 
     if (!mScheduling.empty()) {
+        const std::map<std::shared_ptr<Node>, std::string> namePtrTable
+            = mGraphView->getRankedNodesName("{0} ({1}#{3})");
         const auto globalStart = mScheduling[0].start;
 
         for (const auto& element : mScheduling) {
-            std::fprintf(fp, "%s :%ld, %ld\n",
-                         (element.node->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(element.node.get())))
-                                 .c_str(),
+            auto name = namePtrTable.at(element.node);
+            // Mermaid does not allow : character in task title
+            std::replace(name.begin(), name.end(), ':', '_');
+
+            fmt::print(fp.get(), "{} :{}, {}\n",
+                         name,
                          std::chrono::duration_cast<std::chrono::microseconds>(element.start - globalStart).count(),
                          std::chrono::duration_cast<std::chrono::microseconds>(element.end - globalStart).count());
         }
     }
 
-    std::fprintf(fp, "\n");
-    std::fclose(fp);
+    fmt::print(fp.get(), "\n");
 }
 
 std::set<std::shared_ptr<Aidge::Node>> Aidge::SequentialScheduler::getConsumers(
@@ -254,8 +512,100 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::SequentialScheduler::getConsumers(
 
     for (const auto& producer : producers) {
         const auto& childs = producer->getChildren();
-        consumers.insert(childs.begin(), childs.end());
+        for (const auto& child : childs) {
+            // Do not schedule childs outside current graph!
+            if (mGraphView->inView(child)) {
+                consumers.insert(child);
+            }
+        }
     }
 
     return consumers;
 }
+
+Aidge::NbElts_t Aidge::SequentialScheduler::getNbAvailableData(const std::shared_ptr<Node>& node, const IOIndex_t inputIdx) const {
+    const auto parent = node->inputs()[inputIdx];
+
+    if (parent.first) {
+        // Parent is connected, everything if fine!
+        return parent.first->getOperator()->getNbProducedData(parent.second);
+    }
+    else if (std::shared_ptr<Node> upperNode = mUpperNode.lock()) {
+        // We are inside an upper operator (for instance a MetaOperator)
+        // We need to connect the "local" producer-consumer model to the upper
+        // one, by mapping local node inputs to the upper node inputs.
+        IOIndex_t nodeInputIdx = 0;
+        for (const auto& input : mGraphView->getOrderedInputs()) {
+            if (input.first == node) {
+                // Current node is an input
+                const auto upperInput = upperNode->inputs()[nodeInputIdx];
+                if (upperInput.first) {
+                    return upperInput.first->getOperator()->getNbProducedData(upperInput.second);
+                } 
+            }
+            ++nodeInputIdx;
+        }
+    }
+
+    // Otherwise, two cases:
+    if (node->getOperator()->getRawInput(inputIdx)) {
+        // Input is not connected but a valid tensor exists
+        // => This means data was fed manually to the input, without a Producer
+        // In this case, we assume a single-use data (unlike a Producer, which
+        // keep producing the data each time it is needed).
+        fmt::print("No producer node attached to input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
+        return std::static_pointer_cast<Tensor>(node->getOperator()->getRawInput(inputIdx))->size();
+    }
+    else {
+        // Input is not connected, this is an error
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
+    }
+
+    return 0;
+}
+
+Aidge::SequentialScheduler::PriorProducersConsumers Aidge::SequentialScheduler::getPriorProducersConsumers(
+    const std::shared_ptr<Node>& node) const
+{
+    PriorProducersConsumers prior;
+
+    IOIndex_t inputIdx = 0;
+    for (const auto& parent : node->inputs()) {
+        if (parent.first &&
+            (node->getOperator()->getNbConsumedData(inputIdx) + node->getOperator()->getNbRequiredData(inputIdx)) >
+                    parent.first->getOperator()->getNbProducedData(parent.second))
+        {
+            if (!mGraphView->inView(parent.first)) {
+                // Do not schedule prior outside the current graph!
+                return PriorProducersConsumers();
+            }
+
+            if (parent.first->type() == Producer_Op::Type) {
+                prior.requiredProducers.insert(parent.first);
+                prior.priorConsumers.insert(node);
+            }
+            else if (parent.first->type() == Memorize_Op::Type) {
+                // Break cycles
+                return PriorProducersConsumers();
+            }
+            else {
+                const auto& parentPrior = getPriorProducersConsumers(parent.first);
+
+                if (!parentPrior.isPrior) {
+                    return PriorProducersConsumers();
+                }
+                else {
+                    prior.requiredProducers.insert(parentPrior.requiredProducers.cbegin(), parentPrior.requiredProducers.cend());
+                    prior.priorConsumers.insert(parentPrior.priorConsumers.cbegin(), parentPrior.priorConsumers.cend());
+                }
+            }
+        }
+        ++inputIdx;
+    }
+
+    prior.isPrior = true;
+    if (prior.priorConsumers.empty()) {
+        prior.priorConsumers.insert(node);
+    }
+    return prior;
+}
diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index 806f62d47dcad02614a18d0d7f6e51042b164cc8..9280d5fbdfd0a6a35724e5afd5caf672fefd8bf8 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -38,7 +38,7 @@ target_compile_options(tests${module_name} PUBLIC
     -fvisibility=hidden>)
 target_compile_options(tests${module_name} PRIVATE
 $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
--Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror> ${SANITIZE_FLAGS}>)
+-Wall -Wextra -Wold-style-cast -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror> ${SANITIZE_FLAGS}>)
 target_compile_options(tests${module_name} PRIVATE
 $<$<CXX_COMPILER_ID:GNU>:${STRICT_ALIASING_FLAGS}>)
 target_compile_options(${module_name} PRIVATE
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index ebbfb3ad89721eb4f1390c3efca475acbb0b6f46..437780b959b37e0cf6b5b7796e71c9b931f25bc0 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -74,7 +74,7 @@ TEST_CASE("genRandomGraph", "[GraphView][randomGen]") {
         }
     }
 
-    printf("nbUnicity = %zu/%zu\n", nbUnicity, nbTests);
+    fmt::print("nbUnicity = {}/{}\n", nbUnicity, nbTests);
 }
 
 TEST_CASE("clone", "[GraphView][clone]") {
@@ -147,7 +147,7 @@ TEST_CASE("clone_with_delete", "[GraphView][cloneDelete]") {
         ++seed;
     }
 
-    printf("nbClonedWithDelete = %zu/%zu\n", nbClonedWithDelete, nbTests);
+    fmt::print("nbClonedWithDelete = {}/{}\n", nbClonedWithDelete, nbTests);
 }
 
 TEST_CASE("remove", "[GraphView][remove]") {
@@ -205,7 +205,7 @@ TEST_CASE("remove", "[GraphView][remove]") {
         }
     }
 
-    printf("nbTested = %zu/%zu\n", nbTested, nbTests);
+    fmt::print("nbTested = {}/{}\n", nbTested, nbTests);
 }
 
 TEST_CASE("[core/graph] GraphView(Constructor)", "[GraphView][constructor()]") {
@@ -381,7 +381,7 @@ TEST_CASE("[core/graph] GraphView(save)") {
     g1->addChild(conv5, "c4", 0, 0);
 
     g1->save("./graphExample");
-    printf("File saved in ./graphExample.md\n");
+    fmt::print("File saved in ./graphExample.md\n");
 }
 
 TEST_CASE("[core/graph] GraphView(resetConnections)") {
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
index 1330a8e620ae5d49d6ef61257a587b914ffed1cd..bcd6d0f4cd9ba32ee4318188343b7e6360670d3b 100644
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -9,7 +9,7 @@
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/GenericOperator.hpp"
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index 68e2d4d4d5b4fe1b40f83c087eb61c7865d3db75..3ff2a3c6c7422c1ead53a629670975a25e54f7d7 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -11,10 +11,12 @@
 
 #include <catch2/catch_test_macros.hpp>
 
+#include "aidge/operator/Pop.hpp"
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/MetaOperatorDefs.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Testing.hpp"
+#include "aidge/recipes/Recipes.hpp"
 #include <cstddef>
 
 using namespace Aidge;
@@ -51,4 +53,78 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator]") {
         //auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op->getOperator())->getMicroGraphScheduler();
         //REQUIRE(microGraphScheduler->getStaticScheduling().size() == 2);
     }
+
+    SECTION("LSTM") {
+        auto myLSTM = LSTM(32, 64, 16, true, "ltsm");
+        auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator());
+
+        auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraph();
+        microGraph->save("lstm", false, false);
+
+        REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
+        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->nbOutputs() == 2);
+
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>();
+        myInput->resize({32});
+        std::shared_ptr<Tensor> myInit = std::make_shared<Tensor>();
+        myInit->resize({1, 64});
+
+        op->associateInput(0, myInput);
+        op->associateInput(17, myInit);
+        op->associateInput(18, myInit);
+
+        op->computeOutputDims();
+        microGraph->save("lstm_dims", true, true);
+        REQUIRE(op->outputDimsForwarded());
+
+        //op->updateConsummerProducer();  // require implementation
+        //auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraphScheduler();
+        //microGraphScheduler->saveSchedulingDiagram("lstm_scheduling");
+    }
+
+    SECTION("LSTM(expanded)") {
+        auto pop = Pop();
+        auto myLSTM = LSTM(2, 3, 2, true, "ltsm");
+        auto myGraph = Sequential({pop, myLSTM});
+        auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator());
+
+        REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
+        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->nbOutputs() == 2);
+
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
+            Array3D<float, 2, 3, 2>{{{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}, {{2.0, 3.0}, {4.0, 5.0}, {6.0, 7.0}}}});
+        std::shared_ptr<Tensor> myInit = std::make_shared<Tensor>(
+            Array2D<float, 3, 3>{{{0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}}});
+        std::shared_ptr<Tensor> myInitW = std::make_shared<Tensor>(
+            Array2D<float, 3, 2>{{{0.1, 0.1}, {0.1, 0.1}, {0.1, 0.1}}});
+        std::shared_ptr<Tensor> myInitR = std::make_shared<Tensor>(
+            Array2D<float, 3, 3>{{{0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}, {0.1, 0.1, 0.1}}});
+
+        pop->getOperator()->associateInput(0, myInput);
+        op->associateInput(17, myInit);
+        op->associateInput(18, myInit);
+
+        // Weights X
+        myLSTM->input(1).first->getOperator()->setOutput(0, myInitW);
+        myLSTM->input(2).first->getOperator()->setOutput(0, myInitW);
+        myLSTM->input(3).first->getOperator()->setOutput(0, myInitW);
+        myLSTM->input(4).first->getOperator()->setOutput(0, myInitW);
+        // Weights H
+        myLSTM->input(5).first->getOperator()->setOutput(0, myInitR);
+        myLSTM->input(6).first->getOperator()->setOutput(0, myInitR);
+        myLSTM->input(7).first->getOperator()->setOutput(0, myInitR);
+        myLSTM->input(8).first->getOperator()->setOutput(0, myInitR);
+
+        auto g = getConnectedGraphView(myLSTM);
+        g->save("lstm_before_expand", true, true);
+
+        expandMetaOps(g);
+        g->setRootNode(pop);
+        REQUIRE(g->getRootNode() == pop);
+        g->save("lstm_expanded", true, true);
+
+        REQUIRE(g->getNodes().size() == 41);
+    }
 }
diff --git a/unit_tests/recipies/Test_FuseMulAdd.cpp b/unit_tests/recipes/Test_FuseMulAdd.cpp
similarity index 95%
rename from unit_tests/recipies/Test_FuseMulAdd.cpp
rename to unit_tests/recipes/Test_FuseMulAdd.cpp
index d0875fe10078eb9d8e3a97e0703191b5697f3fda..4c6e3f9d563d2e74958e68f8876a49a8323f4403 100644
--- a/unit_tests/recipies/Test_FuseMulAdd.cpp
+++ b/unit_tests/recipes/Test_FuseMulAdd.cpp
@@ -18,12 +18,12 @@
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 
 namespace Aidge {
 
 
-TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
+TEST_CASE("[cpu/recipes] FuseMulAdd", "[FuseMulAdd][recipes]") {
     // generate the original GraphView
     auto matmul0 = MatMul("matmul0");
     auto add0 = Add(2, "add0");
diff --git a/unit_tests/recipies/Test_LabelGraph.cpp b/unit_tests/recipes/Test_LabelGraph.cpp
similarity index 99%
rename from unit_tests/recipies/Test_LabelGraph.cpp
rename to unit_tests/recipes/Test_LabelGraph.cpp
index e0ba9be6c80ef6109b59458bf52a23120efc7584..78f67d823a17454c1ecff40a2307556c990c4f53 100644
--- a/unit_tests/recipies/Test_LabelGraph.cpp
+++ b/unit_tests/recipes/Test_LabelGraph.cpp
@@ -11,7 +11,7 @@
 
 #include <catch2/catch_test_macros.hpp>
 
-#include "aidge/recipies/LabelGraph.hpp"
+#include "aidge/recipes/LabelGraph.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/MaxPooling.hpp"
diff --git a/unit_tests/recipies/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp
similarity index 92%
rename from unit_tests/recipies/Test_removeFlatten.cpp
rename to unit_tests/recipes/Test_removeFlatten.cpp
index 8d0ff29dae19ba2dd8009441c39da53bf44378f0..6c805e4cfc64ad3dcfbf020e74926dd0aeca5f9f 100644
--- a/unit_tests/recipies/Test_removeFlatten.cpp
+++ b/unit_tests/recipes/Test_removeFlatten.cpp
@@ -16,12 +16,12 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/FC.hpp"
-#include "aidge/recipies/Recipies.hpp"
+#include "aidge/recipes/Recipes.hpp"
 
 namespace Aidge {
 
 
-TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
+TEST_CASE("[cpu/recipes] RemoveFlatten", "[RemoveFlatten][recipes]") {
     // generate the original GraphView
     auto flatten = GenericOperator("Flatten", 1, 0, 1, "myFlatten");
     auto fc = FC(10, 50, "myFC");
diff --git a/unit_tests/scheduler/Test_MemoryManager.cpp b/unit_tests/scheduler/Test_MemoryManager.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a4941203644b7ba291682f3932926a36fa83b745
--- /dev/null
+++ b/unit_tests/scheduler/Test_MemoryManager.cpp
@@ -0,0 +1,599 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/scheduler/MemoryManager.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("allocate1", "[MemoryManager]") {
+    std::shared_ptr<Node> node1
+        = GenericOperator("Fictive", 0, 0, 0, "node1");
+    std::shared_ptr<Node> node2
+        = GenericOperator("Fictive", 0, 0, 0, "node2");
+    std::shared_ptr<Node> node3
+        = GenericOperator("Fictive", 0, 0, 0, "node3");
+    std::shared_ptr<Node> node4
+        = GenericOperator("Fictive", 0, 0, 0, "node4");
+
+    MemoryManager memManager;
+    memManager.allocate(node1, 1024, {node2});
+
+    REQUIRE(memManager.getPeakUsage() == 1024);
+    REQUIRE(memManager.getPlanes(node1).size() == 1);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->offset == 0);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->size == 1024);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node2}));
+    REQUIRE(memManager.getPlanes(node1).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node1).back().size == 1024);
+    REQUIRE(memManager.getPlanes(node1).back().getLimit() == 1024);
+    REQUIRE(memManager.getPlanes(node1).back().count == 1);
+    REQUIRE(memManager.getPlanes(node1).back().length == 1);
+    REQUIRE(memManager.getPlanes(node1).back().stride == 1024);
+
+    memManager.releaseDependencies(node1);
+
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node2}));
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->released == -1);
+
+    memManager.tick();
+    memManager.release(node1);
+
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->released == 1);
+
+    memManager.allocate(node2, 2048, {node3});
+
+    REQUIRE(memManager.getPeakUsage() == 1024 + 2048);
+    REQUIRE(memManager.getPlanes(node2).size() == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->offset == 1024);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node3}));
+    REQUIRE(memManager.getPlanes(node2).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node2).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().getLimit() == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().count == 1);
+    REQUIRE(memManager.getPlanes(node2).back().length == 1);
+    REQUIRE(memManager.getPlanes(node2).back().stride == 2048);
+
+    memManager.releaseDependencies(node2);
+
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node3}));
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->dependencies.empty());
+
+    memManager.tick();
+    memManager.release(node2);
+
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == 2);
+
+    const std::vector<MemoryManager::MemoryPlane>& memPlanes
+        = memManager.getPlanes(node2);
+
+    REQUIRE(memPlanes.size() == 1);
+
+    memManager.reallocate(memPlanes.back().memSpace,
+                          node3, 512, 2048, false, 0, {node4});
+
+    REQUIRE(memManager.getPeakUsage() == 1024 + 2048 + 512);
+    REQUIRE(memManager.getPlanes(node3).size() == 1);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->offset == 1024);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->size == 2048 + 512);
+    REQUIRE(memManager.getPlanes(node3).back().offset == 512);
+    REQUIRE(memManager.getPlanes(node3).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node3).back().getLimit() == 2048);
+    REQUIRE(memManager.getPlanes(node3).back().count == 1);
+    REQUIRE(memManager.getPlanes(node3).back().length == 1);
+    REQUIRE(memManager.getPlanes(node3).back().stride == 2048);
+
+    memManager.releaseDependencies(node3);
+    memManager.tick();
+    memManager.release(node3);
+
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->released == 3);
+
+    memManager.allocate(node4, 1024);
+
+    REQUIRE(memManager.getPeakUsage() == 1024 + 2048 + 512);
+    REQUIRE(memManager.getPlanes(node4).size() == 1);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->allocated == 3);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->offset == 0);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->size == 1024);
+    REQUIRE(memManager.getPlanes(node4).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node4).back().size == 1024);
+    REQUIRE(memManager.getPlanes(node4).back().getLimit() == 1024);
+    REQUIRE(memManager.getPlanes(node4).back().count == 1);
+    REQUIRE(memManager.getPlanes(node4).back().length == 1);
+    REQUIRE(memManager.getPlanes(node4).back().stride == 1024);
+
+    memManager.releaseDependencies(node4);
+    memManager.tick();
+    memManager.release(node4);
+
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->allocated == 3);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->released == 4);
+
+    memManager.log("MemoryManager_allocate1.log");
+}
+
+TEST_CASE("allocate2", "[MemoryManager]") {
+    std::shared_ptr<Node> node1
+        = GenericOperator("Fictive", 0, 0, 0, "node1");
+    std::shared_ptr<Node> node2
+        = GenericOperator("Fictive", 0, 0, 0, "node2");
+    std::shared_ptr<Node> node3
+        = GenericOperator("Fictive", 0, 0, 0, "node3");
+    std::shared_ptr<Node> node4
+        = GenericOperator("Fictive", 0, 0, 0, "node4");
+
+    MemoryManager memManager;
+    memManager.allocate(node1, 1024, {node2});
+
+    REQUIRE(memManager.getPeakUsage() == 1024);
+    REQUIRE(memManager.getPlanes(node1).size() == 1);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->offset == 0);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->size == 1024);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node2}));
+    REQUIRE(memManager.getPlanes(node1).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node1).back().size == 1024);
+    REQUIRE(memManager.getPlanes(node1).back().getLimit() == 1024);
+    REQUIRE(memManager.getPlanes(node1).back().count == 1);
+    REQUIRE(memManager.getPlanes(node1).back().length == 1);
+    REQUIRE(memManager.getPlanes(node1).back().stride == 1024);
+
+    memManager.releaseDependencies(node1);
+
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node2}));
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->released == -1);
+
+    memManager.tick();
+    memManager.release(node1);
+
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->released == 1);
+
+    memManager.allocate(node2, 2048, {node3});
+
+    REQUIRE(memManager.getPeakUsage() == 1024 + 2048);
+    REQUIRE(memManager.getPlanes(node2).size() == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->offset == 1024);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node3}));
+    REQUIRE(memManager.getPlanes(node2).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node2).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().getLimit() == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().count == 1);
+    REQUIRE(memManager.getPlanes(node2).back().length == 1);
+    REQUIRE(memManager.getPlanes(node2).back().stride == 2048);
+
+    memManager.releaseDependencies(node2);
+
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node3}));
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->dependencies.empty());
+
+    memManager.tick();
+    memManager.release(node2);
+
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == 2);
+
+    const std::vector<MemoryManager::MemoryPlane>& memPlanes
+        = memManager.getPlanes(node1);
+
+    REQUIRE(memPlanes.size() == 1);
+
+    memManager.reallocate(memPlanes.back().memSpace,
+                          node3, 512, 2048, false, 0, {node4});
+
+    REQUIRE(memManager.getPeakUsage() == 2048 + 2048 + 512);
+    REQUIRE(memManager.getPlanes(node3).size() == 1);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->offset == 0);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->size == 2048 + 512);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node4}));
+    REQUIRE(memManager.getPlanes(node3).back().offset == 512);
+    REQUIRE(memManager.getPlanes(node3).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node3).back().getLimit() == 2048);
+    REQUIRE(memManager.getPlanes(node3).back().count == 1);
+    REQUIRE(memManager.getPlanes(node3).back().length == 1);
+    REQUIRE(memManager.getPlanes(node3).back().stride == 2048);
+
+    // node2 memSpace should have moved
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == 2);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->offset == 2048 + 512);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node3}));
+    REQUIRE(memManager.getPlanes(node2).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node2).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().getLimit() == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().count == 1);
+    REQUIRE(memManager.getPlanes(node2).back().length == 1);
+    REQUIRE(memManager.getPlanes(node2).back().stride == 2048);
+
+    memManager.releaseDependencies(node3);
+
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node4}));
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies.empty());
+
+    memManager.tick();
+    memManager.release(node3);
+
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->released == 3);
+
+    memManager.allocate(node4, 1024);
+
+    REQUIRE(memManager.getPeakUsage() == 2048 + 2048 + 512);
+    REQUIRE(memManager.getPlanes(node4).size() == 1);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->allocated == 3);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->offset == 2048 + 512);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->size == 1024);
+    REQUIRE(memManager.getPlanes(node4).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node4).back().size == 1024);
+    REQUIRE(memManager.getPlanes(node4).back().getLimit() == 1024);
+    REQUIRE(memManager.getPlanes(node4).back().count == 1);
+    REQUIRE(memManager.getPlanes(node4).back().length == 1);
+    REQUIRE(memManager.getPlanes(node4).back().stride == 1024);
+
+    memManager.releaseDependencies(node4);
+    memManager.tick();
+    memManager.release(node4);
+
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->allocated == 3);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->released == 4);
+
+    memManager.log("MemoryManager_allocate2.log");
+}
+
+TEST_CASE("allocate3", "[MemoryManager]") {
+    std::shared_ptr<Node> node1
+        = GenericOperator("Fictive", 0, 0, 0, "node1");
+    std::shared_ptr<Node> node2
+        = GenericOperator("Fictive", 0, 0, 0, "node2");
+    std::shared_ptr<Node> node3
+        = GenericOperator("Fictive", 0, 0, 0, "node3");
+    std::shared_ptr<Node> node4
+        = GenericOperator("Fictive", 0, 0, 0, "node4");
+
+    MemoryManager memManager;
+    memManager.allocate(node1, 1024, {node2});
+
+    REQUIRE(memManager.getPeakUsage() == 1024);
+    REQUIRE(memManager.getPlanes(node1).size() == 1);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->offset == 0);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->size == 1024);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node2}));
+    REQUIRE(memManager.getPlanes(node1).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node1).back().size == 1024);
+    REQUIRE(memManager.getPlanes(node1).back().getLimit() == 1024);
+    REQUIRE(memManager.getPlanes(node1).back().count == 1);
+    REQUIRE(memManager.getPlanes(node1).back().length == 1);
+    REQUIRE(memManager.getPlanes(node1).back().stride == 1024);
+
+    memManager.releaseDependencies(node1);
+
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node2}));
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->released == -1);
+
+    memManager.tick();
+    memManager.release(node1);
+
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->released == 1);
+
+    memManager.allocate(node2, 2048, {node3});
+
+    REQUIRE(memManager.getPeakUsage() == 1024 + 2048);
+    REQUIRE(memManager.getPlanes(node2).size() == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->offset == 1024);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node3}));
+    REQUIRE(memManager.getPlanes(node2).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node2).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().getLimit() == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().count == 1);
+    REQUIRE(memManager.getPlanes(node2).back().length == 1);
+    REQUIRE(memManager.getPlanes(node2).back().stride == 2048);
+
+    memManager.releaseDependencies(node2);
+
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node3}));
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->dependencies.empty());
+
+    memManager.tick();
+    memManager.release(node2);
+
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == 2);
+
+    const std::vector<MemoryManager::MemoryPlane>& memPlanes
+        = memManager.getPlanes(node1);
+
+    REQUIRE(memPlanes.size() == 1);
+
+    memManager.reallocate(memPlanes.back().memSpace,
+                          node3, 512, 2048, false);
+
+    REQUIRE(memManager.getPeakUsage() == 2048 + 2048 + 512);
+    REQUIRE(memManager.getPlanes(node3).size() == 1);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->offset == 0);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->size == 2048 + 512);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->dependencies.empty());
+    REQUIRE(memManager.getPlanes(node3).back().offset == 512);
+    REQUIRE(memManager.getPlanes(node3).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node3).back().getLimit() == 2048);
+    REQUIRE(memManager.getPlanes(node3).back().count == 1);
+    REQUIRE(memManager.getPlanes(node3).back().length == 1);
+    REQUIRE(memManager.getPlanes(node3).back().stride == 2048);
+
+    // node2 memSpace should have moved
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == 2);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->offset == 2048 + 512);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node3}));
+    REQUIRE(memManager.getPlanes(node2).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node2).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().getLimit() == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().count == 1);
+    REQUIRE(memManager.getPlanes(node2).back().length == 1);
+    REQUIRE(memManager.getPlanes(node2).back().stride == 2048);
+
+    memManager.releaseDependencies(node3);
+
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->dependencies.empty());
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies.empty());
+
+    memManager.tick();
+    memManager.release(node3);
+
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->released == 3);
+
+    memManager.reallocate(memPlanes.back().memSpace,
+                          node4, 256, 1024, false);
+
+    REQUIRE(memManager.getPeakUsage() == 2048 + 2048 + 512);
+    REQUIRE(memManager.getPlanes(node4).size() == 1);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->offset == 0);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->size == 2048 + 512);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->dependencies.empty());
+    REQUIRE(memManager.getPlanes(node4).back().offset == 256);
+    REQUIRE(memManager.getPlanes(node4).back().size == 1024);
+    REQUIRE(memManager.getPlanes(node4).back().getLimit() == 2048 + 256);
+    REQUIRE(memManager.getPlanes(node4).back().count == 1);
+    REQUIRE(memManager.getPlanes(node4).back().length == 1);
+    REQUIRE(memManager.getPlanes(node4).back().stride == 1024);
+
+    // node2 memSpace should not have moved
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == 2);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->offset == 2048 + 512);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node2).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().getLimit() == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().count == 1);
+    REQUIRE(memManager.getPlanes(node2).back().length == 1);
+    REQUIRE(memManager.getPlanes(node2).back().stride == 2048);
+
+    memManager.releaseDependencies(node4);
+    memManager.tick();
+    memManager.release(node4);
+
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->released == 4);
+
+    memManager.log("MemoryManager_allocate3.log");
+}
+
+TEST_CASE("allocate3_wrapAround", "[MemoryManager]") {
+    std::shared_ptr<Node> node1
+        = GenericOperator("Fictive", 0, 0, 0, "node1");
+    std::shared_ptr<Node> node2
+        = GenericOperator("Fictive", 0, 0, 0, "node2");
+    std::shared_ptr<Node> node3
+        = GenericOperator("Fictive", 0, 0, 0, "node3");
+    std::shared_ptr<Node> node4
+        = GenericOperator("Fictive", 0, 0, 0, "node4");
+
+    MemoryManager memManager;
+    memManager.allocate(node1, 1024, {node2});
+
+    REQUIRE(memManager.getPeakUsage() == 1024);
+    REQUIRE(memManager.getPlanes(node1).size() == 1);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->offset == 0);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->size == 1024);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node2}));
+    REQUIRE(memManager.getPlanes(node1).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node1).back().size == 1024);
+    REQUIRE(memManager.getPlanes(node1).back().getLimit() == 1024);
+    REQUIRE(memManager.getPlanes(node1).back().count == 1);
+    REQUIRE(memManager.getPlanes(node1).back().length == 1);
+    REQUIRE(memManager.getPlanes(node1).back().stride == 1024);
+
+    memManager.releaseDependencies(node1);
+
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node2}));
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->released == -1);
+
+    memManager.tick();
+    memManager.release(node1);
+
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->released == 1);
+
+    memManager.allocate(node2, 2048, {node3});
+
+    REQUIRE(memManager.getPeakUsage() == 1024 + 2048);
+    REQUIRE(memManager.getPlanes(node2).size() == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->offset == 1024);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node3}));
+    REQUIRE(memManager.getPlanes(node2).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node2).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().getLimit() == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().count == 1);
+    REQUIRE(memManager.getPlanes(node2).back().length == 1);
+    REQUIRE(memManager.getPlanes(node2).back().stride == 2048);
+
+    memManager.releaseDependencies(node2);
+
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node3}));
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node1).back().memSpace->dependencies.empty());
+
+    memManager.tick();
+    memManager.release(node2);
+
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == 2);
+
+    const std::vector<MemoryManager::MemoryPlane>& memPlanes
+        = memManager.getPlanes(node1);
+
+    REQUIRE(memPlanes.size() == 1);
+
+    memManager.reallocate(memPlanes.back().memSpace,
+                          node3, 512, 2048, true);
+
+    REQUIRE(memManager.getPeakUsage() == 2048 + 2048);
+    REQUIRE(memManager.getPlanes(node3).size() == 1);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->offset == 0);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->size == 2048);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->dependencies.empty());
+    REQUIRE(memManager.getPlanes(node3).back().offset == 512);
+    REQUIRE(memManager.getPlanes(node3).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node3).back().getLimit() == 2048 - 512);
+    REQUIRE(memManager.getPlanes(node3).back().count == 1);
+    REQUIRE(memManager.getPlanes(node3).back().length == 1);
+    REQUIRE(memManager.getPlanes(node3).back().stride == 2048);
+
+    // node2 memSpace should have moved
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == 2);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->offset == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies ==
+        std::set<std::shared_ptr<Node> >({node3}));
+    REQUIRE(memManager.getPlanes(node2).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node2).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().getLimit() == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().count == 1);
+    REQUIRE(memManager.getPlanes(node2).back().length == 1);
+    REQUIRE(memManager.getPlanes(node2).back().stride == 2048);
+
+    memManager.releaseDependencies(node3);
+
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->dependencies.empty());
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->dependencies.empty());
+
+    memManager.tick();
+    memManager.release(node3);
+
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node3).back().memSpace->released == 3);
+
+    memManager.reallocate(memPlanes.back().memSpace,
+                          node4, 1024, 1792, true);
+
+    REQUIRE(memManager.getPeakUsage() == 2048 + 2048);
+    REQUIRE(memManager.getPlanes(node4).size() == 1);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->released == -1);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->offset == 0);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->size == 2048);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->dependencies.empty());
+    REQUIRE(memManager.getPlanes(node4).back().offset == 1024);
+    REQUIRE(memManager.getPlanes(node4).back().size == 1792);
+    REQUIRE(memManager.getPlanes(node4).back().getLimit() == 2048 - 1024);
+    REQUIRE(memManager.getPlanes(node4).back().count == 1);
+    REQUIRE(memManager.getPlanes(node4).back().length == 1);
+    REQUIRE(memManager.getPlanes(node4).back().stride == 1792);
+
+    // node2 memSpace should not have moved
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->allocated == 1);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->released == 2);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->offset == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().memSpace->size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().offset == 0);
+    REQUIRE(memManager.getPlanes(node2).back().size == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().getLimit() == 2048);
+    REQUIRE(memManager.getPlanes(node2).back().count == 1);
+    REQUIRE(memManager.getPlanes(node2).back().length == 1);
+    REQUIRE(memManager.getPlanes(node2).back().stride == 2048);
+
+    memManager.releaseDependencies(node4);
+    memManager.tick();
+    memManager.release(node4);
+
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->allocated == 0);
+    REQUIRE(memManager.getPlanes(node4).back().memSpace->released == 4);
+
+    memManager.log("MemoryManager_allocate3_wrapAround.log");
+}
diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7e28f1fadc56855d266c1e8547261f5903f8c724
--- /dev/null
+++ b/unit_tests/scheduler/Test_Scheduler.cpp
@@ -0,0 +1,77 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <algorithm> // std::sort
+#include <cassert>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Testing.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/scheduler/Scheduler.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("randomScheduling", "[Scheduler][randomGen]") {
+    const size_t nbTests = 100;
+    size_t nbUnicity = 0;
+
+    for (int test = 0; test < nbTests; ++test) {
+        std::random_device rd;
+        const std::mt19937::result_type seed(rd());
+
+        RandomGraph randGraph;
+        randGraph.acyclic = true;
+        const auto g1 = std::make_shared<GraphView>("g1");
+        const bool unicity1 = g1->add(randGraph.gen(seed, 10));
+
+        if (unicity1) {
+            for (auto& node : g1->getNodes()) {
+                std::static_pointer_cast<GenericOperator_Op>(node->getOperator())->setComputeOutputDims(GenericOperator_Op::InputIdentity(0, node->nbOutputs()));
+            }
+
+            const auto orderedInputs = g1->getOrderedInputs();
+            for (const auto& input : orderedInputs) {
+                auto prod = Producer({16, 32});
+                prod->addChild(input.first, 0, input.second);
+                g1->add(prod);
+            }
+
+            g1->save("schedule");
+            g1->forwardDims();
+
+            auto scheduler = SequentialScheduler(g1);
+            scheduler.generateScheduling(true);
+            const auto sch = scheduler.getStaticScheduling();
+
+            const auto namePtrTable = g1->getRankedNodesName("{0} ({1}#{3})");
+
+            std::vector<std::string> nodesName;
+            std::transform(sch.begin(), sch.end(),
+                std::back_inserter(nodesName),
+                [&namePtrTable](auto val){ return namePtrTable.at(val); });
+
+            fmt::print("schedule: {}\n", nodesName);
+            REQUIRE(sch.size() == 10 + orderedInputs.size());
+        }
+    }
+
+    fmt::print("nbUnicity = {}/{}\n", nbUnicity, nbTests);
+}