diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index ead6c19fa5fe1e91ec1c24cf8dfee6146390477f..3dbf54a5fa58be40b08f58d760f3991586203825 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -554,16 +554,11 @@ public:
     inline void print() const { fmt::print("{}\n", toString()); }
 
     std::shared_ptr<Tensor> grad() {
-        // if (!mGrad && mImpl) {
-        //     mGrad = std::make_shared<Tensor>(mDims);
-        //     mGrad->setDataType(mDataType);
-        //     mGrad->setBackend(mImpl->backend());
-
-        //     // if (mImpl) mGrad->setBackend(mImpl->backend());
-        // }
-
         return mGrad;
     }
+    void setGrad(std::shared_ptr<Tensor> newGrad) {
+        mGrad = newGrad;
+    }
 
     /**
      * @brief Associate the gradient with a Tensor instance and set its implementation
@@ -574,7 +569,7 @@ public:
      * @note If Tensor instance and implementation already existed for the gradient
      * nothing is done.
      */
-    void initGradient() {
+    void initGrad() {
         if (!mGrad) {
             mGrad = std::make_shared<Tensor>(mDims);
         }
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 6a0460941940f33a3be33bc9edbf84da32777730..c9a4c11d780a41a1620518047d66a7de2d7b55fa 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -210,7 +210,7 @@ public:
      * @brief Compute dimensions of input/output Tensors for each Operator of the
      * GraphView object's Nodes.
      */
-    bool forwardDims(const std::vector<std::vector<DimSize_t>> dims = {}, bool allowDataDependency = false);
+    bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false);
 
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
@@ -486,6 +486,14 @@ public:
      */
     IOIndex_t getNbFreeDataInputs() const;
 
+    /**
+     * @brief Force update of GraphView inputs/outputs.
+     * It may be necessary to force the update of GraphView inputs/outputs when
+     * connections are added or removed inside the GraphView **after** the nodes
+     * were added.
+     */
+    void updateInputsOutputs();
+
 private:
 ///////////////////////////////////////////////////////
 //        TENSOR MANAGEMENT
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index c677da0f2e34a299ddec6ee85f5a84616206193d..a411101618a5f4acaf070516d67691a6b55e3ff5 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -70,16 +70,9 @@ public:
         return mScheduler;
     }
 
-    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
-        AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
-        AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
-
-        const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
-        inputOp.first->getOperator()->associateInput(inputOp.second, data);
-
-        // Associate inputs for custom implementation
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
+    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
+    void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
+    void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final;
 
     bool forwardDims(bool allowDataDependency = false) override final {
         // Check first that all required inputs are available, otherwise
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 6086c5145eb39cee081468ba91473dc983cfa35f..a493793278d42904d8a62e31571720f94ff1655d 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -56,8 +56,8 @@ public:
     ///////////////////////////////////////////////////
     // Tensor access
     // input management
-    void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
-    void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final;
+    void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override;
+    void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override;
     const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const;
     std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final;
 
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 7e9072857dae8fa3137065e5c47cc11d88d37efe..23825079673129ea08aa7da40b21a8cc921d6ba0 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -105,7 +105,7 @@ public:
     void forward() override final;
 
     void backward() override final {
-        fmt::print("Basic Producer backward() function.\n");
+        // fmt::print("Basic Producer backward() function.\n");
     }
     void setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) override {
         if (getAttr<ProdAttr::Constant>()) {
diff --git a/include/aidge/scheduler/ParallelScheduler.hpp b/include/aidge/scheduler/ParallelScheduler.hpp
index 0b6f963d61bf0079a9a32bd335ba765788aba2a5..abacebf4e0c45130bb0e41872577052cfe0a176c 100644
--- a/include/aidge/scheduler/ParallelScheduler.hpp
+++ b/include/aidge/scheduler/ParallelScheduler.hpp
@@ -37,7 +37,7 @@ public:
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    virtual void forward(bool forwardDims = true, std::vector<std::shared_ptr<Aidge::Tensor>> data = {});
+    virtual void forward(bool forwardDims = true, const std::vector<std::shared_ptr<Aidge::Tensor>>& data = {});
 };
 } // namespace Aidge
 
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 2f8fbb7aeb6562e0dd309f8f53def6d0fed5a08a..792d73693be0780f2e938d828b0f29889216631b 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -114,7 +114,7 @@ public:
      *
      * @param data data input tensors
      */
-    void connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data);
+    void connectInputs(const std::vector<std::shared_ptr<Aidge::Tensor>>& data);
 
     /**
      * @brief Save in a Markdown file the static scheduling with early and late relative order for the nodes.
diff --git a/include/aidge/scheduler/SequentialScheduler.hpp b/include/aidge/scheduler/SequentialScheduler.hpp
index 9cf0c2c1877bbbe5930c6b1e39f2a46c33e21d93..a7929fde8a2affdd562d70d11a7c809aaf3357d0 100644
--- a/include/aidge/scheduler/SequentialScheduler.hpp
+++ b/include/aidge/scheduler/SequentialScheduler.hpp
@@ -49,12 +49,12 @@ public:
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    virtual void forward(bool forwardDims = true, std::vector<std::shared_ptr<Aidge::Tensor>> data = {});
+    virtual void forward(bool forwardDims = true, const std::vector<std::shared_ptr<Aidge::Tensor>>& data = {});
 
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    void backward(std::vector<std::shared_ptr<Aidge::Tensor>> data, bool instantiateGrad = true);
+    void backward(bool instantiateGrad = true);
 
 private:
     SchedulingPolicy mSchedulingPolicy;
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 44c3b1f5e8df833344fa9b7fe72bdb4ef1e0ec12..113377b33d9827c3428eeb0adc92111f75c22abb 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -21,6 +21,7 @@
 
 #include "aidge/utils/future_std/any.hpp"
 #include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -86,7 +87,7 @@ public:
     template<class T> void addAttr(const std::string& name, const T& value)
     {
         const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
-        assert(res.second && "attribute already exists");
+        AIDGE_ASSERT(res.second, "attribute already exists");
 
 #ifdef PYBIND
         // We cannot handle Python object if the Python interpreter is not running
@@ -129,10 +130,10 @@ public:
     void addAttrPy(const std::string& name, py::object&& value)
     {
         auto it = mAttrs.find(name);
-        assert(it == mAttrs.end() && "attribute already exists");
+        AIDGE_ASSERT(it == mAttrs.end(), "attribute already exists");
 
         const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
-        assert(res.second && "attribute already exists");
+        AIDGE_ASSERT(res.second, "attribute already exists");
     }
 
     void setAttrPy(const std::string& name, py::object&& value) override final
@@ -199,6 +200,8 @@ public:
     };
 #endif
 
+    virtual ~DynamicAttributes() {}
+
 private:
 #ifdef PYBIND
     // Stores C++ attributes (copy) and Python-only attributes
diff --git a/python_binding/data/pybind_Database.cpp b/python_binding/data/pybind_Database.cpp
index 903e692ca3d14d6ae25f0d6f151b1b08d557d924..4bc28a19d350236933c3b6c139e9e3a4d980fa3f 100644
--- a/python_binding/data/pybind_Database.cpp
+++ b/python_binding/data/pybind_Database.cpp
@@ -1,13 +1,40 @@
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
 #include "aidge/data/Database.hpp"
+#include "aidge/data/Tensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
-void init_Database(py::module& m){
+/**
+ * @brief Trampoline class for binding
+ *
+ */
+class pyDatabase : public Database {
+   public:
+    using Database::Database;  // Inherit constructors
 
-    py::class_<Database, std::shared_ptr<Database>>(m,"Database");
+    std::vector<std::shared_ptr<Tensor>> getItem(
+        const std::size_t index) const override {
+        PYBIND11_OVERRIDE_PURE_NAME(std::vector<std::shared_ptr<Tensor>>, Database,
+                               "get_item", getItem, index);
+    }
+    std::size_t getLen() const noexcept override {
+        PYBIND11_OVERRIDE_PURE_NAME(std::size_t, Database, "len", getLen);
+    }
+    std::size_t getNbModalities() const noexcept override {
+        PYBIND11_OVERRIDE_PURE_NAME(std::size_t, Database, "get_nb_modalities",
+                               getNbModalities);
+    }
+};
 
-    
-}
+void init_Database(py::module& m) {
+    py::class_<Database, std::shared_ptr<Database>, pyDatabase>(
+        m, "Database", py::dynamic_attr())
+        .def(py::init<>())
+        .def("get_item", &Database::getItem)
+        .def("len", &Database::getLen)
+        .def("get_nb_modalities", &Database::getNbModalities);
 }
+}  // namespace Aidge
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index b97af94ad583cf42e25fa3afc0697021f6dcadcc..3c2120565e1637697e5258723b1b366a520fdf80 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -77,7 +77,9 @@ void init_Tensor(py::module& m){
     .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
     .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
     .def("grad", &Tensor::grad)
+    .def("set_grad", &Tensor::setGrad)
     .def("dtype", &Tensor::dataType)
+    .def("init_grad", &Tensor::initGrad)
     .def("size", &Tensor::size)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize)
     .def("has_impl", &Tensor::hasImpl)
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index f12ab25bf60fb32fb3b91a59997007fd2e266e5d..7b38c2d72d5f4b2eed8d8bbf9f41f47144b51060 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -73,6 +73,7 @@ void init_Recipes(py::module&);
 void init_GraphViewHelper(py::module&);
 
 void init_Scheduler(py::module&);
+void init_MemoryManager(py::module&);
 void init_TensorUtils(py::module&);
 void init_Filler(py::module&);
 
@@ -136,6 +137,7 @@ void init_Aidge(py::module& m) {
     init_Recipes(m);
     init_GraphViewHelper(m);
     init_Scheduler(m);
+    init_MemoryManager(m);
     init_TensorUtils(m);
     init_Filler(m);
 }
diff --git a/python_binding/recipes/pybind_GraphViewHelper.cpp b/python_binding/recipes/pybind_GraphViewHelper.cpp
index ac56fb4b43eb5b0a737157ec9e64c6771a692816..e65b790d3eba6072e3e1b112c7d841959d4a5672 100644
--- a/python_binding/recipes/pybind_GraphViewHelper.cpp
+++ b/python_binding/recipes/pybind_GraphViewHelper.cpp
@@ -24,5 +24,6 @@ namespace py = pybind11;
 namespace Aidge {
 void init_GraphViewHelper(py::module &m) {
     m.def("producers", &producers, py::arg("graphview"));
+    m.def("compile_gradient", &compile_gradient, py::arg("graphview"));
 }
 } // namespace Aidge
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index f122c411618ce28a641fd46ee568f99cc48e9f58..b85d1c41ed90a5774a9b24062dfda4186c2294d5 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -21,66 +21,70 @@
 namespace py = pybind11;
 
 namespace Aidge {
-void init_Recipes(py::module &m) {
+void init_Recipes(py::module &m) 
+{
 
 
   m.def("fuse_mul_add", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseMulAdd), py::arg("graph_view"), R"mydelimiter(
-    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+    Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
-    :param graph_view: Graph view on which we want to apply the recipie
+    :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
   // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
-  //   Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+  //   recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
   //   :param nodes: The MatMul and Add nodes to fuse.
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
   m.def("remove_dropout",static_cast<void(*)(std::shared_ptr<GraphView>)>(removeDropout), py::arg("graph_view"), R"mydelimiter(
-    Recipie to remove a dropout operator.
+    Recipe to remove a dropout operator.
 
-    :param graph_view: Graph view on which we want to apply the recipie
+    :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
   m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
-    Recipie to remove a flatten operator.
+    Recipe to remove a flatten operator.
 
-    :param graph_view: Graph view on which we want to apply the recipie
+    :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
   // m.def("remove_flatten", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(removeFlatten), py::arg("nodes"), R"mydelimiter(
-  //   Recipie to remove a flatten operator.
+  //   Recipe to remove a flatten operator.
 
   //   :param nodes: The flatten operator to remove.
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
   // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
-  //   Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+  //   Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
   //   :param nodes: The MatMul and Add nodes to fuse.
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
   m.def("fuse_batchnorm", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseBatchNorm), py::arg("graph_view"), R"mydelimiter(
-    Recipie to remove a flatten operator.
+    Recipe to remove a flatten operator.
 
-    :param graph_view: Graph view on which we want to apply the recipie
+    :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
- m.def("get_conv_horizontal_tiling", static_cast<std::set<std::shared_ptr<Node>>(*)(const std::shared_ptr<Node>&, const DimIdx_t, const std::size_t)>(getConvHorizontalTiling),
+  m.def("get_conv_horizontal_tiling", static_cast<std::set<std::shared_ptr<Node>>(*)(const std::shared_ptr<Node>&, const DimIdx_t, const std::size_t)>(getConvHorizontalTiling),
         py::arg("node"), py::arg("axis"), py::arg("nb_slices"));
 
   // m.def("fuse_batchnorm", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseBatchNorm), py::arg("nodes"), R"mydelimiter(
-  //   Recipie to remove a flatten operator.
+  //   recipe to remove a flatten operator.
 
   //   :param nodes: The flatten operator to remove.
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
+
+  m.def("expand_metaops", static_cast<void(*)(std::shared_ptr<GraphView>, bool)>(expandMetaOps), py::arg("graph_view"), py::arg("recursive") = false);
 }
+
 } // namespace Aidge
diff --git a/python_binding/scheduler/pybind_MemoryManager.cpp b/python_binding/scheduler/pybind_MemoryManager.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0f18db405bec0aee9637f2e5f2ecc7b71e502cc5
--- /dev/null
+++ b/python_binding/scheduler/pybind_MemoryManager.cpp
@@ -0,0 +1,108 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include "aidge/scheduler/MemoryManager.hpp"
+
+namespace py = pybind11;
+
+namespace Aidge {
+
+void init_MemoryManager(py::module& m)
+{
+    py::enum_<MemoryManager::OptimizeStrategy>(m, "OptimizeStrategy")
+        .value("None", MemoryManager::OptimizeStrategy::None)
+        .value("OptimizeMaxLifetimeMinSizeFirst", MemoryManager::OptimizeStrategy::OptimizeMaxLifetimeMinSizeFirst)
+        .value("OptimizeMaxLifetimeMaxSizeFirst", MemoryManager::OptimizeStrategy::OptimizeMaxLifetimeMaxSizeFirst)
+        .value("OptimizeMaxHoleMaxLifetimeFirst", MemoryManager::OptimizeStrategy::OptimizeMaxHoleMaxLifetimeFirst)
+        .export_values();
+
+    py::class_<MemoryManager::MemorySpace, std::shared_ptr<MemoryManager::MemorySpace>>(m, "MemorySpace")
+        .def(py::init<MemoryManager::Clock_T, unsigned int, unsigned int, std::set<std::shared_ptr<Node>> >(), py::arg("clock"), py::arg("offset"), py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>())
+        .def_readwrite("offset", &MemoryManager::MemorySpace::offset)
+        .def_readwrite("size", &MemoryManager::MemorySpace::size)
+        .def_readwrite("dependencies", &MemoryManager::MemorySpace::dependencies)
+        .def_readwrite("allocated", &MemoryManager::MemorySpace::allocated)
+        .def_readwrite("released", &MemoryManager::MemorySpace::released);
+
+    py::class_<MemoryManager::MemoryPlane, std::shared_ptr<MemoryManager::MemoryPlane>>(m, "MemoryPlane")
+        .def(py::init<std::shared_ptr<MemoryManager::MemorySpace>, 
+                      MemoryManager::Clock_T, unsigned int, unsigned int,
+                      unsigned int, unsigned int, unsigned int>(),
+                      py::arg("mem_space"), py::arg("clock"), py::arg("offset"), 
+                      py::arg("size"), py::arg("stride"), py::arg("length"), py::arg("count"))
+        .def_readwrite("mem_space", &MemoryManager::MemoryPlane::memSpace)
+        .def_readwrite("allocated", &MemoryManager::MemoryPlane::allocated)
+        .def_readwrite("offset", &MemoryManager::MemoryPlane::offset)
+        .def_readwrite("size", &MemoryManager::MemoryPlane::size)
+        .def_readwrite("stride", &MemoryManager::MemoryPlane::stride)
+        .def_readwrite("length", &MemoryManager::MemoryPlane::length)
+        .def_readwrite("count", &MemoryManager::MemoryPlane::count)
+        .def("get_size", &MemoryManager::MemoryPlane::getSize)
+        .def("get_useful_size", &MemoryManager::MemoryPlane::getUsefulSize)
+        .def("get_contiguous_offset", &MemoryManager::MemoryPlane::getContiguousOffset)
+        .def("get_contiguous_size", &MemoryManager::MemoryPlane::getContiguousSize)
+        .def("get_wrapped_offset", &MemoryManager::MemoryPlane::getWrappedOffset)
+        .def("get_wrapped_size", &MemoryManager::MemoryPlane::getWrappedSize)
+        .def("get_final_offset", &MemoryManager::MemoryPlane::getFinalOffset)
+        .def("get_upper_offset", &MemoryManager::MemoryPlane::getUpperOffset)
+        .def("get_limit", &MemoryManager::MemoryPlane::getLimit);
+
+    py::class_<MemoryManager::MaxLifetimeMinSizeFirst>(m, "MaxLifetimeMinSizeFirst")
+        .def(py::init<unsigned int>(), py::arg("max_lifetime"))
+        .def_readonly("max_lifetime", &MemoryManager::MaxLifetimeMinSizeFirst::maxLifetime)
+        .def("__call__", &MemoryManager::MaxLifetimeMinSizeFirst::operator(), py::arg("p0"), py::arg("p1"));
+
+    py::class_<MemoryManager::MaxLifetimeMaxSizeFirst>(m, "MaxLifetimeMaxSizeFirst")
+        .def(py::init<unsigned int>(), py::arg("max_lifetime"))
+        .def_readonly("max_lifetime", &MemoryManager::MaxLifetimeMaxSizeFirst::maxLifetime)
+        .def("__call__", &MemoryManager::MaxLifetimeMaxSizeFirst::operator(), py::arg("p0"), py::arg("p1"));
+
+    py::class_<MemoryManager::MaxHoleMaxLifetimeFirst>(m, "MaxHoleMaxLifetimeFirst")
+        .def(py::init<unsigned int, MemoryManager*>(), py::arg("max_lifetime"), py::arg("inst"))
+        .def_readonly("max_lifetime", &MemoryManager::MaxHoleMaxLifetimeFirst::maxLifetime)
+        .def_readwrite("inst", &MemoryManager::MaxHoleMaxLifetimeFirst::inst)
+        .def("__call__", &MemoryManager::MaxHoleMaxLifetimeFirst::operator(), py::arg("p0"), py::arg("p1"));
+
+    py::class_<MemoryManager, std::shared_ptr<MemoryManager>>(m, "MemoryManager")
+        .def(py::init<>())
+        .def("reserve", (std::shared_ptr<MemoryManager::MemorySpace> (MemoryManager::*)(unsigned int, const std::set<std::shared_ptr<Node>>&)) &MemoryManager::reserve, py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>())
+        .def("expand", &MemoryManager::expand, py::arg("mem_space"), py::arg("required_size"))
+        .def("allocate", (MemoryManager::MemoryPlane (MemoryManager::*)(unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::allocate, py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("allocate", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::allocate, py::arg("node"), py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("is_wrap_around", &MemoryManager::isWrapAround, py::arg("mem_space"), py::arg("offset"), py::arg("size"), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("reallocate", (MemoryManager::MemoryPlane (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("mem_space"), py::arg("offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("reallocate", (MemoryManager::MemoryPlane (MemoryManager::*)(const MemoryManager::MemoryPlane&, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("memPlane"), py::arg("extra_offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("reallocate", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>, const std::shared_ptr<Node>&, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("mem_space"), py::arg("node"), py::arg("offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("reallocate", (unsigned int (MemoryManager::*)(const MemoryManager::MemoryPlane&, const std::shared_ptr<Node>&, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("mem_plane"), py::arg("node"), py::arg("extra_offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("release", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>)) &MemoryManager::release, py::arg("mem_space"))
+        .def("release", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&)) &MemoryManager::release, py::arg("node"))
+        .def("release_dependencies", &MemoryManager::releaseDependencies, py::arg("node"))
+        .def("optimize", &MemoryManager::optimize, py::arg("strategy"))
+        .def("get_offset", &MemoryManager::getOffset, py::arg("node"), py::arg("plane") = 0)
+        .def("get_size", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&, unsigned int) const) &MemoryManager::getSize, py::arg("node"), py::arg("plane"))
+        .def("get_size", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&) const) &MemoryManager::getSize, py::arg("node"))
+        .def("get_peak_usage", &MemoryManager::getPeakUsage)
+        .def("get_max_lifetime", &MemoryManager::getMaxLifetime)
+        .def("get_planes", (const std::vector<MemoryManager::MemoryPlane>& (MemoryManager::*)(const std::shared_ptr<Node>&) const) &MemoryManager::getPlanes, py::arg("node"))
+        .def("get_planes", (const MemoryManager::MemMap_T& (MemoryManager::*)() const) &MemoryManager::getPlanes)
+        .def("get_planes", (MemoryManager::MemMap_T (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>) const) &MemoryManager::getPlanes, py::arg("mem_space"))
+        .def("get_nb_planes", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&) const) &MemoryManager::getNbPlanes, py::arg("node"))
+        .def("get_nb_planes", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>) const) &MemoryManager::getNbPlanes, py::arg("mem_space"))
+        .def("get_current_tick", &MemoryManager::getCurrentTick)
+        .def("tick", &MemoryManager::tick)
+        .def("log", &MemoryManager::log, py::arg("file_name"))
+        ;
+}
+
+}   // Aidge
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index c0966e54d4f025a607aa9763a3657de5b39d2ff4..b16134da324383a4542965393257288c49dceed0 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -11,6 +11,7 @@
 
 #include <pybind11/pybind11.h>
 #include <pybind11/stl.h>
+#include "aidge/scheduler/MemoryManager.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/scheduler/SequentialScheduler.hpp"
 #include "aidge/scheduler/ParallelScheduler.hpp"
@@ -22,16 +23,18 @@ namespace Aidge {
 void init_Scheduler(py::module& m){
     py::class_<Scheduler, std::shared_ptr<Scheduler>>(m, "Scheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
+    .def("graph_view", &Scheduler::graphView)
     .def("save_scheduling_diagram", &Scheduler::saveSchedulingDiagram, py::arg("file_name"))
     .def("resetScheduling", &Scheduler::resetScheduling)
     .def("generate_scheduling", &Scheduler::generateScheduling)
     .def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0)
+    .def("generate_memory", &Scheduler::generateMemory, py::arg("inc_producers") = false, py::arg("wrap_around_buffer") = false)
     ;
 
     py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>, Scheduler>(m, "SequentialScheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("data")=std::vector<Tensor>())
-    .def("backward", &SequentialScheduler::backward, py::arg("data"), py::arg("instanciate_grad")=true)
+    .def("backward", &SequentialScheduler::backward, py::arg("instanciate_grad")=true)
     ;
 
     py::class_<ParallelScheduler, std::shared_ptr<ParallelScheduler>, Scheduler>(m, "ParallelScheduler")
diff --git a/src/filler/Filler.cpp b/src/filler/Filler.cpp
index 34e04c2ba84ad493429bceadd54f4fa27df69bcd..f5839087c2e37c5e0288f08716595a0ed66e869e 100644
--- a/src/filler/Filler.cpp
+++ b/src/filler/Filler.cpp
@@ -20,12 +20,12 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-
 void Aidge::calculateFanInFanOut(std::shared_ptr<Aidge::Tensor> tensor,
                                  std::uint32_t& fanIn, std::uint32_t& fanOut) {
-    AIDGE_ASSERT(
-        tensor->nbDims() == 4,
-        "Tensor need to have 4 dimensions to compute FanIn and FanOut.");
+    AIDGE_ASSERT(tensor->nbDims() == 4 || tensor->nbDims() == 2,
+                 "Tensor need to have 4 or 2 dimensions to compute FanIn and "
+                 "FanOut, but found a tensor with {} dims.",
+                 tensor->nbDims());
     // Warning: This function suppose NCXX data layout.
     // Aidge currently only support NCHW but this maybe not be true in the
     // future.
@@ -35,6 +35,6 @@ void Aidge::calculateFanInFanOut(std::shared_ptr<Aidge::Tensor> tensor,
                  "Cannot calculate FanIn if tensor batch size is 0.");
     AIDGE_ASSERT(channelSize != 0,
                  "Cannot calculate FanOut if tensor channel size is 0.");
-    fanIn =  static_cast<std::uint32_t>(tensor->size() / batchSize);
+    fanIn = static_cast<std::uint32_t>(tensor->size() / batchSize);
     fanOut = static_cast<std::uint32_t>(tensor->size() / channelSize);
 }
diff --git a/src/filler/HeFiller.cpp b/src/filler/HeFiller.cpp
index 74d681f1a05c15045d27a0fe678aa676d16af077..ff20b76183c03e7ac90b5c225b3da7a8c6ffb2df 100644
--- a/src/filler/HeFiller.cpp
+++ b/src/filler/HeFiller.cpp
@@ -29,7 +29,9 @@ void Aidge::heFiller(std::shared_ptr<Aidge::Tensor> tensor,
               : (varianceNorm == Aidge::VarianceNorm::Average)
                   ? (fanIn + fanOut) / 2.0
                   : fanOut);
-
+    AIDGE_ASSERT(n > 0,
+                 "Something went wrong division by zero or square root of "
+                 "negative value.");
     const T stdDev(std::sqrt(2.0 / n));
 
     const T mean(varianceNorm == Aidge::VarianceNorm::FanIn ? meanNorm / fanIn
diff --git a/src/filler/XavierFiller.cpp b/src/filler/XavierFiller.cpp
index a1de15971ca8063e504e270fa6d2275d93270460..734874d449c83087ca0e93df7eeb620e178ee7ba 100644
--- a/src/filler/XavierFiller.cpp
+++ b/src/filler/XavierFiller.cpp
@@ -29,6 +29,9 @@ void Aidge::xavierUniformFiller(std::shared_ptr<Aidge::Tensor> tensor,
               : (varianceNorm == Aidge::VarianceNorm::Average)
                   ? (fanIn + fanOut) / 2.0
                   : fanOut);
+    AIDGE_ASSERT(n > 0,
+                 "Something went wrong division by zero or square root of "
+                 "negative value.");
     const T scale(std::sqrt(3.0 / n));
 
     std::uniform_real_distribution<T> uniformDist(-scale, scale);
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index df2177cf6910a3c40ef269d18bf148d60b5faa66..55fe69678d7d6582f13c48a285fb4f7bfa2a1419 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -83,6 +83,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
     }
 
     fmt::print(fp.get(),
+                "```mermaid\n"
                 "%%{{init: {{'flowchart': {{ 'curve': 'monotoneY'}}, "
                 "'fontFamily': 'Verdana' }} }}%%\nflowchart TB\n\n");
 
@@ -204,6 +205,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
     fmt::print(fp.get(), "classDef producerCls_rootCls stroke:#f00,fill:#ccf\n");
     fmt::print(fp.get(), "classDef genericCls_rootCls stroke:#f00,fill:#f9f9ff,stroke-width:1px,stroke-dasharray: 5 5\n");
     fmt::print(fp.get(), "classDef metaCls_rootCls stroke:#f00,stroke-width:5px\n");
+    fmt::print(fp.get(), "```\n");
     fmt::print(fp.get(), "\n");
 }
 
@@ -391,7 +393,7 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
     forwardDims(dims);
 }
 
-bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims, bool allowDataDependency) {
+bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) {
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
@@ -414,9 +416,10 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                     i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type());
             } else {
                 // Input is missing
-                AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i)
-                    && !std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(),
+                AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i),
                   "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
+                AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(),
+                  "Empty input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
             }
 
         }
@@ -907,7 +910,7 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
                                                      newGraph->getOrderedOutputs();
 
     auto inputParents = std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(oldOIn.size());
-    auto outputChildren = std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(oldOOut.size());
+    auto outputChildren = std::vector<std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>>(oldOOut.size());
 
     // keep in memory every node related to the node to replace :
     // Parent
@@ -918,19 +921,12 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
         // inputParent.first -> addChild(newOI[i].first, inputParent.second, newOI[i].second);
     }
     // Children
-    for (std::size_t i = 0; i < oldOOut.size();) {
+    for (std::size_t i = 0; i < oldOOut.size(); ++i) {
         std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> outputChild =
               oldOOut[i].first -> output(oldOOut[i].second);
-        if (outputChild.empty()) {
-            outputChildren[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>({nullptr, gk_IODefaultIndex});
-            ++i;
-        }
-        else {
-            for (const auto& child : outputChild) {
-                if (oldNodes.find(child.first) == oldNodes.cend()) {
-                    outputChildren[i] = child;
-                    ++i;
-                }
+        for (const auto& child : outputChild) {
+            if (oldNodes.find(child.first) == oldNodes.cend()) {
+                outputChildren[i].push_back(child);
             }
         }
     }
@@ -968,8 +964,8 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
             }
         }
         for (std::size_t o = 0; o < oldOOut.size(); ++o) {
-            if (outputChildren[o].first) {
-                newOOut[o].first -> addChild(outputChildren[o].first, newOOut[o].second, outputChildren[o].second);
+            for (const auto& child : outputChildren[o]) {
+                newOOut[o].first -> addChild(child.first, newOOut[o].second, child.second);
             }
         }
     }
@@ -979,15 +975,21 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
         if (newNodes.size() == 0) {
             // Case 3
             if (oldOIn.size() == oldOOut.size()) {
+                // Same number of inputs and outputs: connect each input to the corresponding output
                 for (std::size_t i = 0; i < oldOIn.size(); ++i) {
                     if (inputParents[i].first) {
-                      inputParents[i].first -> addChild(outputChildren[i].first, inputParents[i].second, outputChildren[i].second);
+                      for (const auto& child : outputChildren[i]) {
+                        inputParents[i].first -> addChild(child.first, inputParents[i].second, child.second);
+                      }
                     }
                 }
             }
             else if ((oldOIn.size() == 1) && (inputParents[0].first)) {
-                for (std::size_t i = 0; i < oldOIn.size(); ++i) {
-                    inputParents[0].first -> addChild(outputChildren[i].first, inputParents[0].second, outputChildren[i].second);
+                // Single input: connect the only input to all the outputs
+                for (std::size_t i = 0; i < oldOOut.size(); ++i) {
+                    for (const auto& child : outputChildren[i]) {
+                        inputParents[0].first -> addChild(child.first, inputParents[0].second, child.second);
+                    }
                 }
             }
         }
@@ -1008,8 +1010,8 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
                 }
             }
             for (std::size_t o = 0; o < oldOOut.size(); ++o) {
-                if (outputChildren[o].first) {
-                    newOOut[o].first -> addChild(outputChildren[o].first, newOOut[o].second, outputChildren[o].second);
+                for (const auto& child : outputChildren[o]) {
+                    newOOut[o].first -> addChild(child.first, newOOut[o].second, child.second);
                 }
             }
         }
@@ -1058,6 +1060,12 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
     return true;
 }
 
+void Aidge::GraphView::updateInputsOutputs() {
+  for (auto node : mNodes) {
+    updateInputsOutputsNew(node);
+  }
+}
+
 void Aidge::GraphView::updateInputsOutputsNew(std::shared_ptr<Node> newNode) {
   // Can be called several times with the same node, e.g. when addChild() is
   // called on a node already part of the GraphView. In this case, inputs/outputs
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 6bafb3b7905ae36e23af32f8d60be33a4ba178bf..9b77ffcbe0117292ed0aa520309febf709e8dd68 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -63,7 +63,8 @@ bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
                         *it = dim;
                     }
                     else if ((dim != *it) && (dim != 1)) {
-                        AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported Tensor shape for Add operation: {}", outDims);
+                        AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Add Operation: {} for previous inputs vs {} for input#{}",
+                            outDims, getInput(i)->dims(), i);
                     }
                 }
             }
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 813ab774b11cd72f440d28f61843500686d7df2d..e6300d08c2c792c8a3eb66b307aca53f9d2acc73 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -44,7 +44,8 @@ bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) {
                 outDims[out_id] = lowDims[low_id];
             }
             else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported Tensor shape for Div Operation: {}", outDims);
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Div Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
             }
             --out_id;
             --low_id;
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 46e9e1173af98ed5711aa0bbce54705fb61dc03c..36ff1854703d015980a1943390eb87d0863d877f 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -37,6 +37,37 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar
     }
 }
 
+void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) {
+    AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
+    AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
+
+    const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
+    inputOp.first->getOperator()->associateInput(inputOp.second, data);
+
+    // Associate inputs for custom implementation
+    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
+}
+
+void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Data>& data) {
+    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
+
+    const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
+    inputOp.first->getOperator()->setInput(inputOp.second, data);
+
+    // Associate inputs for custom implementation
+    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
+}
+
+void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, std::shared_ptr<Data>&& data) {
+    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
+
+    const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
+    inputOp.first->getOperator()->setInput(inputOp.second, std::forward<std::shared_ptr<Data>>(data));
+
+    // Associate inputs for custom implementation
+    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
+}
+
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
     if (mImpl) {
         return mImpl->getNbRequiredData(inputIdx);
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index 5a25e4dd447f44220dbe4124e63f567520ad8d1e..426de388f31391fb5e59446d50e50de94ca5f8a1 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -45,7 +45,8 @@ bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
                 outDims[out_id] = lowDims[low_id];
             }
             else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported Tensor shape for Mul Operation: {}", outDims);
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Mul Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
             }
             --out_id;
             --low_id;
@@ -53,9 +54,6 @@ bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
         mOutputs[0]->resize(outDims);
         return true;
     }
-    else if (!getInput(0)->empty() && !getInput(1)->empty()) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul: {} and {}", getInput(0)->dims(), getInput(1)->dims());
-    }
 
     return false;
 }
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 42715516e6804c1a48ef848fbda8f9d596f0e69e..135c792345b0caf1166e671a8dad7d5b49b42ee7 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -44,7 +44,8 @@ bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
                 outDims[out_id] = lowDims[low_id];
             }
             else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported Tensor shape for Pow Operation: {}", outDims);
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Pow Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
             }
             --out_id;
             --low_id;
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index f384c10138500f454720395e7387c331d67440b6..7059ea7e989d789b4cff0ed895fc2c5ec0ad81bc 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -72,9 +72,6 @@ void Aidge::Producer_Op::forward() {
     if (!backend().empty()) {
         mImpl->forward();
     }
-    else {
-        fmt::print("Basic Producer forward() function.\n");
-    }
 
     runHooks();
 }
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index 50e556ad97a90b7a9868594cebe350d955983fd7..b977f4ee7ccce32d7f7929cbee99140aea36cd2f 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -46,7 +46,8 @@ bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
                 outDims[out_id] = lowDims[low_id];
             }
             else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported Tensor shape for Sub Operation: {}", outDims);
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Sub Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
             }
             --out_id;
             --low_id;
diff --git a/src/recipes/GraphViewHelper.cpp b/src/recipes/GraphViewHelper.cpp
index 3b42db7fe18d2269b95cf35fd92851d1e3684bad..b0c99bffb895dc64b20d76991911ae5f4b604c85 100644
--- a/src/recipes/GraphViewHelper.cpp
+++ b/src/recipes/GraphViewHelper.cpp
@@ -51,7 +51,7 @@ void Aidge::compile_gradient(std::shared_ptr<Aidge::GraphView> gv) {
         AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Cannot instanciate gradient of an Operator ({}) that doesn't use Tensor.", node->getOperator()->type());
         const std::shared_ptr<OperatorTensor> op = std::dynamic_pointer_cast<OperatorTensor>(node -> getOperator());
         for (std::size_t o = 0; o < node -> nbOutputs(); ++o) {
-            op->getOutput(o)->initGradient();
+            op->getOutput(o)->initGrad();
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/scheduler/ParallelScheduler.cpp b/src/scheduler/ParallelScheduler.cpp
index 1dd13fe2100122002d4ed068ada4851b1bfba463..4e515099006b9e0588eafc7e981c5f5e80bbe97d 100644
--- a/src/scheduler/ParallelScheduler.cpp
+++ b/src/scheduler/ParallelScheduler.cpp
@@ -28,7 +28,7 @@
 #include "aidge/operator/Memorize.hpp"
 #include "aidge/operator/MetaOperator.hpp"
 
-void Aidge::ParallelScheduler::forward(bool forwardDims, std::vector<std::shared_ptr<Aidge::Tensor>> data) {
+void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std::shared_ptr<Aidge::Tensor>>& data) {
     // Collect all data input of the graph (that are producers)
     if (!data.empty()){
         connectInputs(data);
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 4e3f9978837120bd01a3de2cfe2d22e33f9d7828..af10e3dcd3ead044f8619c40570936f53039d9a2 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -195,7 +195,9 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S
             // be put back in the consumers list once the remaining consumers
             // have been exhausted.
             bool isStillConsumer = false;
-            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) {
+            // Only look for data inputs. If no data is available on data input,
+            // by definition, no parameter can be consumed on parameter inputs.
+            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbData(); ++inputIdx) {
                 AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx);
 
                 if (consumer->getOperator()->getNbConsumedData(inputIdx) <
@@ -280,7 +282,12 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S
     mPriorCache.clear();
 
     if (!consumers.empty()) {
-        Log::warn("Remaining consumers: possible dead-lock");
+        std::vector<std::string> consumersName;
+        std::transform(consumers.begin(), consumers.end(),
+            std::back_inserter(consumersName),
+            [&namePtrTable](auto val){ return namePtrTable.at(val); });
+
+        Log::warn("Remaining consumers: {}. Possible dead-lock.", consumersName);
     }
 
     return schedule;
@@ -491,17 +498,17 @@ Aidge::MemoryManager Aidge::Scheduler::generateMemory(bool incProducers, bool wr
                 const MemoryManager::MemoryPlane& memPlane
                     = (wrapAroundBuffer && wrapAroundSize > 0)
                         ? (*wrapAroundMemPlane[outputIdx]) :
-                            memManager.allocate(requiredSize.data, childs, stride, length, count);
+                            memManager.allocate(size, childs, stride, length, count);
 
                 if (wrapAroundBuffer && wrapAroundSize > 0) {
                     memManager.reallocate(memPlane,
                         node, 0,
-                        requiredSize.data, true, wrapAroundExtra, childs, stride, length, count);
+                        size, true, wrapAroundExtra, childs, stride, length, count);
                 }
                 else {
                     memManager.reallocate(memPlane.memSpace,
                         node, memPlane.offset,
-                        requiredSize.data, false, 0, childs, stride, length, count);
+                        size, false, 0, childs, stride, length, count);
                 }
             }
 
@@ -513,12 +520,23 @@ Aidge::MemoryManager Aidge::Scheduler::generateMemory(bool incProducers, bool wr
     return memManager;
 }
 
-void Aidge::Scheduler::connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data){
+void Aidge::Scheduler::connectInputs(const std::vector<std::shared_ptr<Aidge::Tensor>>& data){
     // This version of connect inputs only connects tensor inputs in input data producers.
     auto inputNodes = mGraphView->getOrderedInputs();
 
     // Assert that the number of input data producers corresponds to the number of data input
-    assert(data.size() == inputNodes.size()  && "Scheduler connectInput error - Inconsistent number of graph inputs and inputs passed to the graph");
+    if (data.size() != inputNodes.size()) {
+        const std::map<std::shared_ptr<Node>, std::string> namePtrTable
+            = mGraphView->getRankedNodesName("{0} ({1}#{3})");
+
+        std::vector<std::pair<std::string, IOIndex_t>> inputNodesName;
+        std::transform(inputNodes.begin(), inputNodes.end(),
+            std::back_inserter(inputNodesName),
+            [&namePtrTable](auto val){ return std::make_pair(namePtrTable.at(val.first), val.second); });
+
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Provided {} inputs to the scheduler, but graph has {} inputs (required inputs in order: )",
+            data.size(), inputNodes.size(), inputNodesName);
+    }
 
     for (std::size_t i = 0; i < data.size(); ++i){
         // TODO : maybe shallow copy instead of deepcopy
diff --git a/src/scheduler/SequentialScheduler.cpp b/src/scheduler/SequentialScheduler.cpp
index 801f46ffb0293696dad8a84908bdda2bbd789bfc..74b1b3f0c6e9be164792460669821744661c15b3 100644
--- a/src/scheduler/SequentialScheduler.cpp
+++ b/src/scheduler/SequentialScheduler.cpp
@@ -28,7 +28,7 @@
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/recipes/GraphViewHelper.hpp"
 
-void Aidge::SequentialScheduler::forward(bool forwardDims, std::vector<std::shared_ptr<Aidge::Tensor>> data) {
+void Aidge::SequentialScheduler::forward(bool forwardDims, const std::vector<std::shared_ptr<Aidge::Tensor>>& data) {
     // Collect all data input of the graph (that are producers)
     if (!data.empty()){
         connectInputs(data);
@@ -73,21 +73,12 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, std::vector<std::shar
     }
 }
 
-void Aidge::SequentialScheduler::backward(std::vector<std::shared_ptr<Aidge::Tensor>> data, bool instanciateGrad) {
+void Aidge::SequentialScheduler::backward(bool instanciateGrad) {
     // create ad set Grad values
     if (instanciateGrad) { compile_gradient(mGraphView); }
 
-    const auto& ordered_outputs = mGraphView->getOrderedOutputs();
-    AIDGE_ASSERT(ordered_outputs.size() == data.size(), "You must provide the \
-                   right number of data objects to run the backward function. \
-                   {} outputs detected for the current GraphView when {} were \
-                   provided.", ordered_outputs.size(), data.size());
-    for (std::size_t i = 0; i < ordered_outputs.size(); ++i) {
-        const std::shared_ptr<OperatorTensor> op_ = std::dynamic_pointer_cast<OperatorTensor>(ordered_outputs[i].first->getOperator());
-        const std::shared_ptr<Tensor> t_grad = op_->getOutput(ordered_outputs[i].second)->grad();
-        AIDGE_ASSERT(data[i]->dims() == t_grad->dims(), "Wrong gradient size.");
-        *t_grad = data[i]->clone();
-    }
+    // TODO: Check output grad are not empty
+
     // Generate scheduling *only if empty*
     // If scheduling was already generated (in one or several steps, i.e. one or
     // several successive call to generateScheduling()), do not generate it twice