diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py
index c65dcc6cfc4be8825d1213854014718fb7170854..4b5c448355a17fd4274ba45f5cd98afa70b1ae53 100644
--- a/aidge_core/__init__.py
+++ b/aidge_core/__init__.py
@@ -8,4 +8,5 @@ http://www.eclipse.org/legal/epl-2.0.
 SPDX-License-Identifier: EPL-2.0
 """
 from aidge_core.aidge_core import * # import so generated by PyBind
-from aidge_core.export import ExportNode
+from aidge_core.export import ExportNode, generate_file, generate_str
+import aidge_core.utils
diff --git a/aidge_core/export/__init__.py b/aidge_core/export/__init__.py
index 00b44121d68af06171525fdf953bf50e53328421..6fc846d93301f45b0635cd9b2fabae65fa7be8ab 100644
--- a/aidge_core/export/__init__.py
+++ b/aidge_core/export/__init__.py
@@ -1 +1,2 @@
 from .node_export import *
+from .code_generation import *
diff --git a/aidge_core/export/code_generation.py b/aidge_core/export/code_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..b18b5476f8e083bcbe3d4f6c4a57132ebe7b780f
--- /dev/null
+++ b/aidge_core/export/code_generation.py
@@ -0,0 +1,47 @@
+import os
+from jinja2 import Environment, FileSystemLoader
+
+
+def generate_file(file_path: str, template_path: str, **kwargs) -> None:
+    """Generate a file at `file_path` using the jinja template located at `file_path`.
+
+    kwargs are used to fill the template.
+
+    :param file_path: path where to generate the file
+    :type file_path: str
+    :param template_path: Path to the template to use for code generation
+    :type template_path: str
+    """
+    # Get directory name of the file
+    dirname = os.path.dirname(file_path)
+
+    # If directory doesn't exist, create it
+    if not os.path.exists(dirname):
+        os.makedirs(dirname)
+
+    # Get directory name and name of the template
+    template_dir = os.path.dirname(template_path)
+    template_name = os.path.basename(template_path)
+
+    # Select template
+    template = Environment(loader=FileSystemLoader(
+        template_dir)).get_template(template_name)
+
+    # Generate file
+    content = template.render(kwargs)
+    with open(file_path, mode="w", encoding="utf-8") as message:
+        message.write(content)
+
+def generate_str(template_path:str, **kwargs) -> str:
+    """Generate a string using the jinja template located at `file_path`.
+    kwargs are used to fill the template.
+
+    :param template_path: Path to the template to use for code generation
+    :type template_path: str
+    :return: A string of the interpreted template
+    :rtype: str
+    """
+    dirname = os.path.dirname(template_path)
+    filename = os.path.basename(template_path)
+    template = Environment(loader=FileSystemLoader(dirname)).get_template(filename)
+    return template.render(kwargs)
diff --git a/aidge_core/export/node_export.py b/aidge_core/export/node_export.py
index 477989b037da6f6229bd275ff22974d9ef307848..7262e9a837424158b8896f305894dcc57769520c 100644
--- a/aidge_core/export/node_export.py
+++ b/aidge_core/export/node_export.py
@@ -39,7 +39,6 @@ class ExportNode(ABC):
             if parent_node is not None:
                 self.inputs_dims.append(self.operator.get_input(idx).dims())
             else:
-                print(self.operator.get_input(idx))
                 if self.operator.get_input(idx) is not None:
                     self.inputs_dims.append(self.operator.get_input(idx).dims())
                 else:
diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py
index 4aacfafd7d51830dc89b7b30ea5ebf521a13fe30..6e0c1f9b9a0828e266ef3bf19ee75df3e275b282 100644
--- a/aidge_core/unit_tests/test_impl.py
+++ b/aidge_core/unit_tests/test_impl.py
@@ -39,7 +39,7 @@ class test_OperatorImpl(unittest.TestCase):
         global GLOBAL_CPT
         matmul = aidge_core.GenericOperator("MatMul", 1, 0, 1, name="MatMul0")
         generic_matmul_op = matmul.get_operator()
-        generic_matmul_op.set_compute_output_dims(lambda x: x)
+        generic_matmul_op.set_forward_dims(lambda x: x)
         generic_matmul_op.set_impl(testImpl(generic_matmul_op))
         generic_matmul_op.forward()
         self.assertEqual(GLOBAL_CPT, 1)
@@ -52,6 +52,7 @@ class test_OperatorImpl(unittest.TestCase):
         self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
         conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
         conv.get_operator().set_backend("cpu")
+        conv.get_operator().set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
         conv.get_operator().forward()
         self.assertEqual(GLOBAL_CPT, 1)
 
@@ -65,6 +66,7 @@ class test_OperatorImpl(unittest.TestCase):
         conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
         model = aidge_core.sequential([conv])
         model.set_backend("cpu")
+        conv.get_operator().set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
         conv.get_operator().forward()
         self.assertEqual(GLOBAL_CPT, 1)
 
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index c94960733b24444218b1209463adbda11b89f6e8..164aee726255e0478b629ee853d9a1f619945f3a 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -92,14 +92,14 @@ class test_operator_binding(unittest.TestCase):
         attrs.set_attr("d", 23.89)
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
 
-    def test_compute_output_dims(self):
+    def test_forward_dims(self):
         in_dims=[25, 25]
         input = aidge_core.Producer(in_dims, name="In")
         genOp = aidge_core.GenericOperator("genOp", 1, 0, 1, name="genOp")
         _ = aidge_core.sequential([input, genOp])
         self.assertListEqual(genOp.get_operator().get_output(0).dims(), [])
-        genOp.get_operator().set_compute_output_dims(lambda x:x)
-        genOp.get_operator().compute_output_dims()
+        genOp.get_operator().set_forward_dims(lambda x:x)
+        genOp.get_operator().forward_dims()
         self.assertListEqual(genOp.get_operator().get_output(0).dims(), in_dims)
 
     def test_set_impl(self):
diff --git a/aidge_core/utils.py b/aidge_core/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..d82d524b7e886ed396507376a5934a748a89e44c
--- /dev/null
+++ b/aidge_core/utils.py
@@ -0,0 +1,16 @@
+def template_docstring(template_keyword, text_to_replace):
+    """Method to template docstring
+
+    :param template: Template keyword to replace, in the documentation you template word must be between `{` `}`
+    :type template: str
+    :param text_to_replace: Text to replace your template with.
+    :type text_to_replace: str
+    """
+    def dec(func):
+        if "{"+template_keyword+"}" not in func.__doc__:
+            raise RuntimeError(
+                f"The function {function.__name__} docstring does not contain the template keyword: {template_keyword}.")
+        func.__doc__ = func.__doc__.replace(
+            "{"+template_keyword+"}", text_to_replace)
+        return func
+    return dec
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 6a9056723df133fef62e56f969d39d8f69390a76..1fc9168da120ba87c916b1a6a346997be69184b4 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -23,7 +23,7 @@ class Operator;
 
 class OperatorImpl {
 public:
-    OperatorImpl(const Operator& op, const std::string& backend);
+    OperatorImpl(const Operator& op, const std::string& backend = "");
     virtual void forward();
     virtual void backward();
 
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 922acacb070c745b2924d1fb787602326ec9d05a..7cd8c67262221fbf9c1b2415ebf98db56274cce5 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -23,6 +23,8 @@ namespace Aidge {
 
 template <class T>
 class TensorImpl_cpu : public TensorImpl {
+    static_assert(std::is_trivially_copyable<T>::value, "TensorImpl type should be trivially copyable");
+
 private:
     /// Pointer to the data and its capacity
     future_std::span<T> mData;
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index b8623450a9c793e4efaff00d87455ab88aa60207..3dbf54a5fa58be40b08f58d760f3991586203825 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -251,7 +251,6 @@ class Tensor : public Data,
         auto add_ = Add_Op(2);
         add_.associateInput(0, std::make_shared<Tensor>(*this));
         add_.associateInput(1, std::make_shared<Tensor>(other));
-        add_.computeOutputDims();
         add_.setDataType(dataType());
         add_.setBackend(mImpl->backend());
         add_.forward();
@@ -275,7 +274,6 @@ class Tensor : public Data,
         auto sub_ = Sub_Op();
         sub_.associateInput(0, std::make_shared<Tensor>(*this));
         sub_.associateInput(1, std::make_shared<Tensor>(other));
-        sub_.computeOutputDims();
         sub_.setDataType(dataType());
         sub_.setBackend(mImpl->backend());
         sub_.forward();
@@ -299,7 +297,6 @@ class Tensor : public Data,
         auto mul_ = Mul_Op();
         mul_.associateInput(0, std::make_shared<Tensor>(*this));
         mul_.associateInput(1, std::make_shared<Tensor>(other));
-        mul_.computeOutputDims();
         mul_.setDataType(dataType());
         mul_.setBackend(mImpl->backend());
         mul_.forward();
@@ -323,7 +320,6 @@ class Tensor : public Data,
         auto div_ = Div_Op();
         div_.associateInput(0, std::make_shared<Tensor>(*this));
         div_.associateInput(1, std::make_shared<Tensor>(other));
-        div_.computeOutputDims();
         div_.setDataType(dataType());
         div_.setBackend(mImpl->backend());
         div_.forward();
@@ -529,6 +525,7 @@ public:
     template <typename expectedType>
     const expectedType& get(std::size_t idx) const {
         AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
+        AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
         AIDGE_ASSERT(idx < mSize, "idx out of range");
         return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx));
     }
@@ -541,6 +538,7 @@ public:
     template <typename expectedType>
     void set(std::size_t idx, expectedType value){
         AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
+        AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
         AIDGE_ASSERT(idx < mSize, "idx out of range");
         expectedType* dataPtr = static_cast<expectedType*>(mImpl->hostPtr(mImplOffset + idx));
         *dataPtr = value;
@@ -556,16 +554,11 @@ public:
     inline void print() const { fmt::print("{}\n", toString()); }
 
     std::shared_ptr<Tensor> grad() {
-        // if (!mGrad && mImpl) {
-        //     mGrad = std::make_shared<Tensor>(mDims);
-        //     mGrad->setDataType(mDataType);
-        //     mGrad->setBackend(mImpl->backend());
-
-        //     // if (mImpl) mGrad->setBackend(mImpl->backend());
-        // }
-
         return mGrad;
     }
+    void setGrad(std::shared_ptr<Tensor> newGrad) {
+        mGrad = newGrad;
+    }
 
     /**
      * @brief Associate the gradient with a Tensor instance and set its implementation
@@ -576,7 +569,7 @@ public:
      * @note If Tensor instance and implementation already existed for the gradient
      * nothing is done.
      */
-    void initGradient() {
+    void initGrad() {
         if (!mGrad) {
             mGrad = std::make_shared<Tensor>(mDims);
         }
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 845599fd32f9d2557784241d3d39747768638efa..c9a4c11d780a41a1620518047d66a7de2d7b55fa 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -160,7 +160,7 @@ public:
 
     /**
      * @brief List outside input connections of the GraphView. The vector
-     * size is garanteed to match the number of outside inputs of the GraphView. If there is
+     * size is guaranteed to match the number of outside inputs of the GraphView. If there is
      * no external connection to a given input, a pair of nullptr and gk_IODefaultIndex is returned.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
@@ -210,7 +210,7 @@ public:
      * @brief Compute dimensions of input/output Tensors for each Operator of the
      * GraphView object's Nodes.
      */
-    void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {});
+    bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false);
 
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
@@ -376,6 +376,12 @@ public:
         addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
     }
 
+    inline void updateNodeName(const std::string& oldName, const std::string& newName){
+        AIDGE_ASSERT(mNodeRegistry.find(oldName) != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", oldName, name());
+        mNodeRegistry[newName] = mNodeRegistry[oldName];
+        mNodeRegistry.erase(oldName);
+    }
+
     /**
      * @brief Include a GraphView content in the current GraphView and link
      * the two sets by linking one Node from each GraphView.
@@ -480,6 +486,14 @@ public:
      */
     IOIndex_t getNbFreeDataInputs() const;
 
+    /**
+     * @brief Force update of GraphView inputs/outputs.
+     * It may be necessary to force the update of GraphView inputs/outputs when
+     * connections are added or removed inside the GraphView **after** the nodes
+     * were added.
+     */
+    void updateInputsOutputs();
+
 private:
 ///////////////////////////////////////////////////////
 //        TENSOR MANAGEMENT
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 908f56295887bd2fbed3350a026045a4ab6b21d9..2a0a4a3b703670c8ace05e03fc5c797fe861a423 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -235,8 +235,8 @@ public:
   ///////////////////////////////////////////////////////
 
   /**
-   * @brief Vector of pointers to each GraphView containing the object
-   * @return std::vector<GraphView>
+   * @brief Set of pointers to each GraphView containing this Node
+   * @return std::set<GraphView>
    */
   inline std::set<std::shared_ptr<GraphView>> views() const noexcept {
     std::set<std::shared_ptr<GraphView>> res;
@@ -460,10 +460,10 @@ private:
   // OPERATOR FUNCTIONNAL but commented out to avoid iostream inclusion
   // /**
   //  * @brief operator<< overload to ease print & debug of nodes
-  //  * @param[inout] ostream to print to 
+  //  * @param[inout] ostream to print to
   //  * @param[in] n node to print
   //  */
-  // friend std::ostream& operator << (std::ostream& os, Node& n); 
+  // friend std::ostream& operator << (std::ostream& os, Node& n);
 };
 
 } // namespace Aidge
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 93cfb44514e39a489ccb75d86fd6e114da5c6162..4ac14bdaecd16e90586d14699f3b6f1bd6d88cab 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -60,7 +60,7 @@ public:
     // }
 
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 4a8ca19a58427f207f9a4cae0dc9d0c29b54d7e7..af2993d67f16df498f13a0489a3837a8f9fc4a75 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -65,7 +65,7 @@ public:
     }
 
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
 
     std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 64ae368f377d264378036e62175dc10b17aff0f4..aa53f8c43f0be2a0e094946d66fd263bc19e39f5 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -68,7 +68,7 @@ public:
     // }
 
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index bbc776a1175a1fc29d08c3872649a6b7aac2f04f..6efbc0a214dde3ca969226f734b5ee903fe5ab50 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -24,13 +24,20 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Cast_OpImpl : public OperatorImpl {
+public:
+    Cast_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
 
 class Cast_Op : public OperatorTensor,
     public Registrable<Cast_Op, std::string, std::unique_ptr<OperatorImpl>(const Cast_Op&)> {
 public:
     static const std::string Type;
 
-    Cast_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Cast_Op() : OperatorTensor(Type, 1, 0, 1) {
+        mImpl = std::make_shared<Cast_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -39,10 +46,11 @@ public:
     Cast_Op(const Cast_Op& op)
         : OperatorTensor(op)
     {
-        if (op.mImpl) {
+        if (!op.backend().empty()) {
             SET_IMPL_MACRO(Cast_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
+        }
+        else {
+            mImpl = std::make_shared<Cast_OpImpl>(*this);
         }
     }
 
@@ -56,8 +64,6 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
-    void forward() override;
-
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 611ff6bd53b1f16f87f73dd951d0645b9765262e..a9a4c9253f3af9f9cd82390256ec70d066017cc5 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -26,6 +26,12 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Concat_OpImpl : public OperatorImpl {
+public:
+    Concat_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
 enum class ConcatAttr { Axis };
 
 class Concat_Op : public OperatorTensor,
@@ -45,6 +51,7 @@ public:
         if (nbIn == 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
         }
+        mImpl = std::make_shared<Concat_OpImpl>(*this);
     }
 
     /**
@@ -55,10 +62,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
+        if (!op.backend().empty()) {
             SET_IMPL_MACRO(Concat_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
+        }
+        else {
+            mImpl = std::make_shared<Concat_OpImpl>(*this);
         }
     }
 
@@ -70,7 +78,7 @@ public:
         return std::make_shared<Concat_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index c93a098106be76f30c1150ea64c464492429feb9..d6a0df5ab472c4a728e5b5042258d6d2bd34f871 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -108,7 +108,7 @@ public:
 
     // }
 
-    void computeOutputDims() override final {
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
         // check inputs have been associated
         bool associated = true;
         for (IOIndex_t i = 0; i < 3; ++i) {
@@ -118,6 +118,17 @@ public:
             associated &= !(getInput(i)->empty());
         }
         if (associated) {
+            AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
+                     (getInput(0)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()),
+                     "Wrong input size for Conv operator.");
+            AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)) &&
+                        (getInput(1)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()) &&
+                        (getInput(1)->template dims<DIM+2>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
+                        "Wrong weight size for Conv operator.");
+            if(!this->template getAttr<ConvAttr::NoBias>())
+                AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
+                        (getInput(2)->template dims<1>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
+                        "Wrong bias size for Conv operator.");
             std::array<DimSize_t, DIM + 2> outputDims{};
             const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
@@ -135,6 +146,8 @@ public:
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
         }
+
+        return associated;
     }
 
     std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
@@ -147,7 +160,7 @@ public:
         if (firstEltDims.size() != outputDims.size()) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
         }
-        if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+        if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
             // Offset
             auto inputIdxDims = firstEltDims; // batch idx is the same
             inputIdxDims[1] = 0; // each channel is used so start with the first one
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 559c0fc7a97a3a882f6720a91d02dee1af70abd8..2337ff66f00b932a190d5b1735d53df3da8ffdbf 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -90,7 +90,7 @@ public:
     }
 
 
-    void computeOutputDims() override final {
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
         // check inputs have been associated
         // TODO : add a check of inputs dimensions ?
         bool associated = true;
@@ -124,6 +124,8 @@ public:
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
         }
+
+        return associated;
     }
 
     std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
@@ -133,7 +135,7 @@ public:
         if (firstEltDims.size() != outputDims.size()) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
         }
-        if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+        if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
             // Offset
             auto inputIdxDims = firstEltDims; // batch idx is the same
 
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 49410db044518dc3ca2cc33285d570197d83b10a..566f4a6ae69b090b3a035b034406d463eeb77317 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -54,7 +54,7 @@ public:
         return std::make_shared<Div_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 222f0ec1235a946865d1b06948bf8b72c5be5a48..b97874f4e0deafd685453b3ce9865e65fafe7561 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -71,7 +71,7 @@ public:
 
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index b7d18e6443404730bbcb73cf7e6da97b8b3e6a7c..7534b66951cc9d8074d0af7742ba5165013431f5 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -25,6 +25,12 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Gather_OpImpl : public OperatorImpl {
+public:
+    Gather_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
 enum class GatherAttr { Indices, GatheredShape, Axis };
 
 class Gather_Op : public OperatorTensor,
@@ -46,7 +52,9 @@ public:
                 attr<GatherAttr::Indices>(indices),
                 attr<GatherAttr::GatheredShape>(gatheredShape),
                 attr<GatherAttr::Axis>(axis))
-    {}
+    {
+        mImpl = std::make_shared<Gather_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -56,10 +64,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
+        if (!op.backend().empty()) {
             SET_IMPL_MACRO(Gather_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
+        }
+        else {
+            mImpl = std::make_shared<Gather_OpImpl>(*this);
         }
     }
 
@@ -71,7 +80,7 @@ public:
         return std::make_shared<Gather_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index e7d60285b4d45826f1d73635d54f4532b4fb1598..f0b7e92d708dfef65eea0ec7649ccc8716533679 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -31,13 +31,13 @@ class GenericOperator_Op
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
-    ComputeDimsFunc mComputeOutputDims;
+    ComputeDimsFunc mForwardDims;
 
 public:
     GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
         : OperatorTensor(type, nbData, nbParam, nbOut)
     {
-        mImpl = std::make_shared<OperatorImpl>(*this, "");
+        mImpl = std::make_shared<OperatorImpl>(*this);
     }
 
     /**
@@ -61,18 +61,18 @@ public:
     }
 
 public:
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
-    bool outputDimsForwarded() const override final;
+    bool dimsForwarded() const override final;
 
     void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); }
     void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); }
 
-    // Helper functions that can be used with setComputeOutputDims():
+    // Helper functions that can be used with setForwardDims():
     static const ComputeDimsFunc Identity;
     static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs);
-    inline void setComputeOutputDims(ComputeDimsFunc func) {
-        mComputeOutputDims = func;
+    inline void setForwardDims(ComputeDimsFunc func) {
+        mForwardDims = func;
     }
 };
 
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 12c8eb02d9488edeb760b6a063cfac5f8257db18..74529a0ba9481bf6280df8d3ce496f67635a5aef 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -52,7 +52,7 @@ public:
     return std::make_shared<GlobalAveragePooling_Op>(*this);
   }
 
-  void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
   void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 27432bc5bb251003e9e93261593e12c2fa704f3d..367aa4e2d68fb1095b1e3b3be76f6ab59439e47f 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -42,7 +42,7 @@ public:
     Identity_Op()
         : OperatorTensor(Type, 1, 0, 1)
     {
-        mImpl = std::make_shared<OperatorImpl>(*this, "");
+        mImpl = std::make_shared<OperatorImpl>(*this);
     }
 
     /**
@@ -63,7 +63,7 @@ public:
         return std::make_shared<Identity_Op>(*this);
     }
 
-    void computeOutputDims() override final {} // Do nothing
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
 
     /**
      * @brief Check if output dimensions have been computed.
@@ -73,34 +73,15 @@ public:
      * @return true Input has dimensions.
      * @return false Input has no dimensions or is a nullptr.
      */
-    bool outputDimsForwarded() const override final {
+    bool dimsForwarded() const override final {
         return mInputs[0] ? !mInputs[0]->empty() : false;
     }
 
 
-    void forward() override final { runHooks(); }
+    void forward() override final;
 
     void backward() override final { }
 
-    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override final {
-        AIDGE_ASSERT(data->type() == "Tensor", "{} Operator only accepts Tensors as outputs", type());
-        AIDGE_ASSERT(outputIdx < nbInputs(), "{} Operator has {} outputs", type(), nbInputs());
-        *mInputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override final {
-        AIDGE_ASSERT(data->type() == "Tensor", "{} Operator only accepts Tensors as inputs", type());
-        AIDGE_ASSERT(outputIdx < nbInputs(), "{} Operator has {} outputs", type(), nbInputs());
-        *mInputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
-    }
-
-    const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const override final {
-        AIDGE_ASSERT(outputIdx < nbInputs(), "{} Operator has {} outputs", type(), nbInputs());
-        if (mInputs[outputIdx] == nullptr){
-            return mOutputs[outputIdx]; // Input is not initialized with empty tensor
-        }
-        return mInputs[outputIdx]; // Identity, so Output is Input
-    }
     void setBackend(const std::string& /*name*/, DeviceIdx_t /*device*/ = 0) override final {
         // setBackend do nothing, Identity node has no backend it just pass the same Tensor
     }
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index 43bd8b1654206df15cd869cf2d37a216fcc4a733..580d720e617e5b20c0acc7ce5e7f200fe5b25606 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -64,7 +64,7 @@ public:
      * @note - Second input is 1-D: it is promoted to a matrix by appending a 1 to its
      * dimensions (D) -> (D,1). The appended 1 is removed after computation.
      */
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 5b09aa02cd0665172a9ae69549d8d9311e10d024..8aff1582604a9e23e248e7c01521567483c793ad 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -84,7 +84,7 @@ public:
     }
 
 
-    void computeOutputDims() override final {
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
         if (!getInput(0)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
         }
@@ -108,7 +108,9 @@ public:
             outputDims[1] = inputDims[1];
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
+            return true;
         }
+        return false;
     }
 
 
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 7de34563adcaabd63ab036232d4d7b6539fd11eb..6b0ace2eb09fde069f8b9b104f92fc33811c25aa 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -25,6 +25,15 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Memorize_OpImpl : public OperatorImpl {
+public:
+    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    void updateConsummerProducer() override;
+    void forward() override;
+};
+
 enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep };
 
 class Memorize_Op : public OperatorTensor,
@@ -73,8 +82,8 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
-    void computeOutputDims() override;
-    bool outputDimsForwarded() const override;
+    bool forwardDims(bool allowDataDependency = false) override final;
+    bool dimsForwarded() const override;
     void updateConsummerProducer() override;
     void forward() override;
 
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 5ac9cf3c92b1951407e4c1892b1a8dc70a724013..a411101618a5f4acaf070516d67691a6b55e3ff5 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -70,18 +70,11 @@ public:
         return mScheduler;
     }
 
-    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final {
-        AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
-        AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
+    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
+    void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
+    void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final;
 
-        const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
-        inputOp.first->getOperator()->associateInput(inputOp.second, data);
-
-        // Associate inputs for custom implementation
-        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void computeOutputDims() override final {
+    bool forwardDims(bool allowDataDependency = false) override final {
         // Check first that all required inputs are available, otherwise
         // mGraph->forwardDims() will fail!
         bool forwarded = true;
@@ -91,8 +84,9 @@ public:
 
         if (forwarded) {
             // Forward dims of micro-graph
-            mGraph->forwardDims();
+            return mGraph->forwardDims({}, allowDataDependency);
         }
+        return false;
     }
 
 
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index 3652cf9697c6bcfea4befe4cdcdf5b9efff8b70c..e9bcaa871619828a50dcd407d39744e7983fe2c4 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -24,13 +24,20 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Move_OpImpl : public OperatorImpl {
+public:
+    Move_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
 
 class Move_Op : public OperatorTensor,
     public Registrable<Move_Op, std::tuple<std::string, std::string>, std::unique_ptr<OperatorImpl>(const Move_Op&)> {
 public:
     static const std::string Type;
 
-    Move_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Move_Op() : OperatorTensor(Type, 1, 0, 1) {
+        mImpl = std::make_shared<Move_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -39,7 +46,12 @@ public:
     Move_Op(const Move_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Move_Op>::create({mInputs[0]->getImpl()->backend(), mOutputs[0]->getImpl()->backend()})(*this) : nullptr;
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Move_Op, *this, {op.getInput(0)->getImpl()->backend(), op.backend()});
+        }
+        else {
+            mImpl = std::make_shared<Move_OpImpl>(*this);
+        }
     }
 
     /**
@@ -50,14 +62,7 @@ public:
         return std::make_shared<Move_Op>(*this);
     }
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        if (mInputs[0]->getImpl() && Registrar<Move_Op>::exists({mInputs[0]->getImpl()->backend(), name})) {
-            mImpl = Registrar<Move_Op>::create({mInputs[0]->getImpl()->backend(), name})(*this);
-        }
-        mOutputs[0]->setBackend(name, device);
-    }
-
-    void forward() override;
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index cc9fba59431356a132330e453288f2f6e7141178..f53a38a82a6771e416435222137e72366f5f69f3 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -57,7 +57,7 @@ public:
         return std::make_shared<Mul_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index adf45c2d8311112fa145097ee98f46d120bd41ff..a493793278d42904d8a62e31571720f94ff1655d 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -56,8 +56,8 @@ public:
     ///////////////////////////////////////////////////
     // Tensor access
     // input management
-    void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
-    void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final;
+    void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override;
+    void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override;
     const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const;
     std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final;
 
@@ -80,11 +80,13 @@ public:
      * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
      */
     virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
-    virtual void computeOutputDims();
-    virtual bool outputDimsForwarded() const;
+    virtual bool forwardDims(bool allowDataDependency = false);
+    virtual bool dimsForwarded() const;
     ///////////////////////////////////////////////////
 
     virtual void setDataType(const DataType& dataType) const override;
+    
+    virtual void forward() override;
 };
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index dce2a6e9e5ea9e0c5fe9a841c587c1f7bbe36fc7..a4e4ebdce801971de118ca8a263999046a13777d 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -74,7 +74,7 @@ public:
     }
 
 
-    void computeOutputDims() override final {
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
         bool associated = true;
         for (IOIndex_t i = 0; i < nbInputs(); ++i) {
             if (!getInput(i)) {
@@ -95,6 +95,8 @@ public:
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
         }
+
+        return associated;
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 9109ccaeb8bc648fe74510216fad93299740b9bf..2219f30ec9db7acf55491882a78e7a1ed2931cf0 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -24,6 +24,13 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Pop_OpImpl : public OperatorImpl {
+public:
+    Pop_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+    void forward() override;
+};
+
 enum class PopAttr { ForwardStep };
 
 class Pop_Op : public OperatorTensor,
@@ -39,7 +46,9 @@ public:
     Pop_Op()
         : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<PopAttr::ForwardStep>(0))
-    {}
+    {
+        mImpl = std::make_shared<Pop_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -49,10 +58,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
+        if (!op.backend().empty()) {
             SET_IMPL_MACRO(Pop_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
+        }
+        else {
+            mImpl = std::make_shared<Pop_OpImpl>(*this);
         }
     }
 
@@ -66,7 +76,7 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
     void updateConsummerProducer() override;
     void forward() override;
 
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index f2becdc60ceb44c19e341496f71e09f061cea55f..08c4de2a254dd267eda4040b54108f93a0c2d922 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -53,7 +53,7 @@ public:
         return std::make_shared<Pow_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 1e5a3940ba22c659121e76e1855353168d68441a..23825079673129ea08aa7da40b21a8cc921d6ba0 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -47,7 +47,7 @@ public:
           Attributes_(attr<ProdAttr::Constant>(constant))
     {
         mOutputs[0]->resize(dims);
-        mImpl = std::make_shared<OperatorImpl>(*this, "");
+        mImpl = std::make_shared<OperatorImpl>(*this);
     }
 
     /**
@@ -86,9 +86,9 @@ public:
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
     }
 
-    void computeOutputDims() noexcept override final {}
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
 
-    inline bool outputDimsForwarded() const noexcept override final { return true; }
+    inline bool dimsForwarded() const noexcept override final { return true; }
 
 
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
@@ -102,11 +102,10 @@ public:
         return {"data_output"};
     }
 
-    void forward() override final {
-        fmt::print("Basic Producer forward() function.\n");
-    }
+    void forward() override final;
+
     void backward() override final {
-        fmt::print("Basic Producer backward() function.\n");
+        // fmt::print("Basic Producer backward() function.\n");
     }
     void setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) override {
         if (getAttr<ProdAttr::Constant>()) {
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index ab27e4e0233052f7cc155ed0375175a27d3edcf5..ff8d8b0696aafdab48cd37d049fa0473078d7ea6 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -69,7 +69,7 @@ class ReduceMean_Op : public OperatorTensor,
         return std::make_shared<ReduceMean_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 060029bb87ea142728056b3817b8162d566cb458..49ddfc4d76a0602c58c0c768b04ed4b4202f028d 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -23,6 +23,11 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Reshape_OpImpl : public OperatorImpl {
+public:
+    Reshape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
 
 enum class ReshapeAttr { Shape };
 
@@ -42,7 +47,9 @@ public:
     Reshape_Op(const std::vector<std::int64_t>& shape)
         : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<ReshapeAttr::Shape>(shape))
-    {}
+    {
+        mImpl = std::make_shared<Reshape_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -52,10 +59,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
+        if (!op.backend().empty()) {
             SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
+        }
+        else {
+            mImpl = std::make_shared<Reshape_OpImpl>(*this);
         }
     }
 
@@ -67,7 +75,7 @@ public:
         return std::make_shared<Reshape_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 8f54ab217631ac69a4e16555f8e58f550ab0156c..c864bd045d8a5a1fc5f4ee591d1d81fcaf241bac 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -27,9 +27,10 @@ enum class ScalingAttr {
     scalingFactor, quantizedNbBits, isOutputUnsigned
 };
 
-class Scaling_Op : public OperatorTensor,
-    public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
-    public StaticAttributes<ScalingAttr, float, size_t, bool> {
+class Scaling_Op 
+    : public OperatorTensor,
+      public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)>,
+      public StaticAttributes<ScalingAttr, float, size_t, bool> {
 public:
     static const std::string Type;
 
@@ -84,7 +85,11 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
 }
 */
-inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, std::size_t quantizedNbBits=8, bool isOutputUnsigned=true, const std::string& name = "") {
+inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, 
+                                     std::size_t quantizedNbBits=8, 
+                                     bool isOutputUnsigned=true, 
+                                     const std::string& name = "") 
+{
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index f68aa17f480038d8ff7850577c438cfdc6704d59..757e08fe97dd1cc572c08ac7c2b454daa234bdc1 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -24,6 +24,12 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Slice_OpImpl : public OperatorImpl {
+public:
+    Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
 enum class SliceAttr { Starts, Ends, Axes };
 
 class Slice_Op
@@ -44,7 +50,9 @@ public:
           Attributes_(attr<SliceAttr::Starts>(starts),
                       attr<SliceAttr::Ends>(ends),
                       attr<SliceAttr::Axes>(axes))
-    {}
+    {
+        mImpl = std::make_shared<Slice_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
@@ -55,10 +63,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
+        if (!op.backend().empty()) {
             SET_IMPL_MACRO(Slice_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
+        }
+        else {
+            mImpl = std::make_shared<Slice_OpImpl>(*this);
         }
     }
 
@@ -69,12 +78,9 @@ public:
      */
     std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Slice_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index fbcebcc9f62c23e9c60b5dff6f0d41c10d8b8717..e5d8442851c35e9232fdd77d862fb48b71c76f1f 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -57,7 +57,7 @@ public:
         return std::make_shared<Sub_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 1beb5781b9262669cd2acb6ce4ef3aae85843573..16ac2794a283d817f6a4e1586349e55ec626167e 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -26,40 +26,47 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Transpose_OpImpl : public OperatorImpl {
+public:
+    Transpose_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
 enum class TransposeAttr { OutputDimsOrder };
 
-template <DimIdx_t DIM>
 class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
-                public StaticAttributes<TransposeAttr,
-                                       std::array<DimSize_t, DIM>> {
+                public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)>,
+                public StaticAttributes<TransposeAttr, std::vector<DimSize_t>> {
 
    public:
     static const std::string Type;
 
     Transpose_Op() = delete;
 
-    using Attributes_ = StaticAttributes<TransposeAttr,
-                                             std::array<DimSize_t, DIM>>;
+    using Attributes_ = StaticAttributes<TransposeAttr, std::vector<DimSize_t>>;
     template <TransposeAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr Transpose_Op(const std::array<DimSize_t, DIM> &output_dims_order)
+    Transpose_Op(const std::vector<DimSize_t> &output_dims_order)
         : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order)) { }
+          Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order))
+    {
+        mImpl = std::make_shared<Transpose_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Transpose_Op(const Transpose_Op<DIM>& op)
+    Transpose_Op(const Transpose_Op& op)
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.backend());
-        }else{
-            mImpl = nullptr;
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
+        }
+        else {
+            mImpl = std::make_shared<Transpose_OpImpl>(*this);
         }
     }
 
@@ -68,25 +75,12 @@ class Transpose_Op : public OperatorTensor,
      * @see Operator::Transpose_Op
      */
     std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Transpose_Op<DIM>>(*this);
+        return std::make_shared<Transpose_Op>(*this);
     }
 
-    void computeOutputDims() override final {
-        if (!getInput(0)->empty()) {
-            auto attr = (this)->getStaticAttributes();
-            const std::array<DimSize_t, DIM>& outDimsOrder = static_cast<const std::array<DimSize_t, DIM>&>(std::get<0>(attr));
-            std::vector<DimSize_t> outputDims;
-            for (std::size_t i = 0; i < DIM; ++i) {
-                outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
-            }
-            mOutputs[0]->resize(outputDims);
-        }
-    }
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Transpose_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -96,26 +90,10 @@ class Transpose_Op : public OperatorTensor,
     }
 };
 
-template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Transpose(const std::array<DimSize_t, DIM> &output_dims_order,
+inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &output_dims_order,
                                            const std::string& name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Transpose, not supported");
-    return std::make_shared<Node>(std::make_shared<Transpose_Op<static_cast<DimIdx_t>(DIM)>>(output_dims_order), name);
-}
-
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
-template <DimSize_t DIM>
-inline std::shared_ptr<Node> Transpose(
-    DimSize_t const (&output_dims_order)[DIM],
-    const std::string& name = "") {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Transpose, not supported");
-    return Transpose(to_array(output_dims_order), name);
+    return std::make_shared<Node>(std::make_shared<Transpose_Op>(output_dims_order), name);
 }
-
-template <DimIdx_t DIM>
-const std::string Transpose_Op<DIM>::Type = "Transpose";
-
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/scheduler/ParallelScheduler.hpp b/include/aidge/scheduler/ParallelScheduler.hpp
index 0b6f963d61bf0079a9a32bd335ba765788aba2a5..abacebf4e0c45130bb0e41872577052cfe0a176c 100644
--- a/include/aidge/scheduler/ParallelScheduler.hpp
+++ b/include/aidge/scheduler/ParallelScheduler.hpp
@@ -37,7 +37,7 @@ public:
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    virtual void forward(bool forwardDims = true, std::vector<std::shared_ptr<Aidge::Tensor>> data = {});
+    virtual void forward(bool forwardDims = true, const std::vector<std::shared_ptr<Aidge::Tensor>>& data = {});
 };
 } // namespace Aidge
 
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 2f8fbb7aeb6562e0dd309f8f53def6d0fed5a08a..792d73693be0780f2e938d828b0f29889216631b 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -114,7 +114,7 @@ public:
      *
      * @param data data input tensors
      */
-    void connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data);
+    void connectInputs(const std::vector<std::shared_ptr<Aidge::Tensor>>& data);
 
     /**
      * @brief Save in a Markdown file the static scheduling with early and late relative order for the nodes.
diff --git a/include/aidge/scheduler/SequentialScheduler.hpp b/include/aidge/scheduler/SequentialScheduler.hpp
index 9cf0c2c1877bbbe5930c6b1e39f2a46c33e21d93..a7929fde8a2affdd562d70d11a7c809aaf3357d0 100644
--- a/include/aidge/scheduler/SequentialScheduler.hpp
+++ b/include/aidge/scheduler/SequentialScheduler.hpp
@@ -49,12 +49,12 @@ public:
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    virtual void forward(bool forwardDims = true, std::vector<std::shared_ptr<Aidge::Tensor>> data = {});
+    virtual void forward(bool forwardDims = true, const std::vector<std::shared_ptr<Aidge::Tensor>>& data = {});
 
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    void backward(std::vector<std::shared_ptr<Aidge::Tensor>> data, bool instantiateGrad = true);
+    void backward(bool instantiateGrad = true);
 
 private:
     SchedulingPolicy mSchedulingPolicy;
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 44c3b1f5e8df833344fa9b7fe72bdb4ef1e0ec12..113377b33d9827c3428eeb0adc92111f75c22abb 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -21,6 +21,7 @@
 
 #include "aidge/utils/future_std/any.hpp"
 #include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -86,7 +87,7 @@ public:
     template<class T> void addAttr(const std::string& name, const T& value)
     {
         const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
-        assert(res.second && "attribute already exists");
+        AIDGE_ASSERT(res.second, "attribute already exists");
 
 #ifdef PYBIND
         // We cannot handle Python object if the Python interpreter is not running
@@ -129,10 +130,10 @@ public:
     void addAttrPy(const std::string& name, py::object&& value)
     {
         auto it = mAttrs.find(name);
-        assert(it == mAttrs.end() && "attribute already exists");
+        AIDGE_ASSERT(it == mAttrs.end(), "attribute already exists");
 
         const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
-        assert(res.second && "attribute already exists");
+        AIDGE_ASSERT(res.second, "attribute already exists");
     }
 
     void setAttrPy(const std::string& name, py::object&& value) override final
@@ -199,6 +200,8 @@ public:
     };
 #endif
 
+    virtual ~DynamicAttributes() {}
+
 private:
 #ifdef PYBIND
     // Stores C++ attributes (copy) and Python-only attributes
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index a6d1d7a9eb5d88dedaf73564847b0f4fbd797c43..b0acdaff7cb75afec78f0564fb95c98f2b32f47b 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -129,16 +129,16 @@ void declare_registrable(py::module& m, const std::string& class_name){
 *   cyril.moineau@cea.fr
 */
 #ifdef PYBIND
-#define SET_IMPL_MACRO(T_Op, op, backend_name) \
+#define SET_IMPL_MACRO(T_Op, op, ...) \
     if(Py_IsInitialized()) { \
         auto obj = py::cast(&(op)); \
-        (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \
+        (op).setImpl(Registrar<T_Op>::create(__VA_ARGS__)(op)); \
     } else { \
-        (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \
+        (op).setImpl(Registrar<T_Op>::create(__VA_ARGS__)(op)); \
     }
 #else
-#define SET_IMPL_MACRO(T_Op, op, backend_name)                   \
-    (op).setImpl(Registrar<T_Op>::create(backend_name)(op));
+#define SET_IMPL_MACRO(T_Op, op, ...)                   \
+    (op).setImpl(Registrar<T_Op>::create(__VA_ARGS__)(op));
 #endif
 
 }
diff --git a/python_binding/data/pybind_Database.cpp b/python_binding/data/pybind_Database.cpp
index 903e692ca3d14d6ae25f0d6f151b1b08d557d924..4bc28a19d350236933c3b6c139e9e3a4d980fa3f 100644
--- a/python_binding/data/pybind_Database.cpp
+++ b/python_binding/data/pybind_Database.cpp
@@ -1,13 +1,40 @@
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
 #include "aidge/data/Database.hpp"
+#include "aidge/data/Tensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
-void init_Database(py::module& m){
+/**
+ * @brief Trampoline class for binding
+ *
+ */
+class pyDatabase : public Database {
+   public:
+    using Database::Database;  // Inherit constructors
 
-    py::class_<Database, std::shared_ptr<Database>>(m,"Database");
+    std::vector<std::shared_ptr<Tensor>> getItem(
+        const std::size_t index) const override {
+        PYBIND11_OVERRIDE_PURE_NAME(std::vector<std::shared_ptr<Tensor>>, Database,
+                               "get_item", getItem, index);
+    }
+    std::size_t getLen() const noexcept override {
+        PYBIND11_OVERRIDE_PURE_NAME(std::size_t, Database, "len", getLen);
+    }
+    std::size_t getNbModalities() const noexcept override {
+        PYBIND11_OVERRIDE_PURE_NAME(std::size_t, Database, "get_nb_modalities",
+                               getNbModalities);
+    }
+};
 
-    
-}
+void init_Database(py::module& m) {
+    py::class_<Database, std::shared_ptr<Database>, pyDatabase>(
+        m, "Database", py::dynamic_attr())
+        .def(py::init<>())
+        .def("get_item", &Database::getItem)
+        .def("len", &Database::getLen)
+        .def("get_nb_modalities", &Database::getNbModalities);
 }
+}  // namespace Aidge
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index b97af94ad583cf42e25fa3afc0697021f6dcadcc..3c2120565e1637697e5258723b1b366a520fdf80 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -77,7 +77,9 @@ void init_Tensor(py::module& m){
     .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
     .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
     .def("grad", &Tensor::grad)
+    .def("set_grad", &Tensor::setGrad)
     .def("dtype", &Tensor::dataType)
+    .def("init_grad", &Tensor::initGrad)
     .def("size", &Tensor::size)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize)
     .def("has_impl", &Tensor::hasImpl)
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 953ec981e06e8c4050ca24143ff832e9f7112f70..1000374454020625aada7f2043893b229deec833 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -114,10 +114,10 @@ void init_GraphView(py::module& m) {
           :return: Whether any replacement has been made.
           :rtype: bool
           )mydelimiter")
-
+          .def("clone", &GraphView::clone)
           .def("get_nodes", &GraphView::getNodes)
           .def("get_node", &GraphView::getNode, py::arg("node_name"))
-          .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>())
+          .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>(), py::arg("allow_data_dependency") = false)
           .def("compile", &GraphView::compile, py::arg("backend"), py::arg("datatype"), py::arg("device") = 0, py::arg("dims")=std::vector<std::vector<DimSize_t>>())
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
           .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 116b9dc404861bfba813c4961db8b6c457fae154..b22ebdd0f6cdb5bd738cd164b3fc2e9fe36d9987 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -28,7 +28,7 @@ void init_Node(py::module& m) {
     R"mydelimiter(
     Name of the Node.
     )mydelimiter")
-
+    .def("clone", (NodePtr (Node::*)() const) &Node::clone)
     .def("type", &Node::type,
     R"mydelimiter(
     Type of the node.
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 31ee946fc99df40133ff04965c762f9ddae0d131..897cd359a4b368dc599f37136ade3508b5ec5a76 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -25,7 +25,7 @@ void init_GenericOperator(py::module& m) {
     py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
     .def_readonly_static("identity", &GenericOperator_Op::Identity)
-    .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"));
+    .def("set_forward_dims", &GenericOperator_Op::setForwardDims, py::arg("computation_function"));
 
     // &GenericOperator
     m.def("GenericOperator",
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 4796917fbe34dbf3b7455841c9e3f1c13ca9c64d..e00f70413614a96919c2a068303b3fbc3f6eca8d 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -25,6 +25,7 @@ namespace py = pybind11;
 namespace Aidge {
 void init_Operator(py::module& m){
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
+    .def("backend", &Operator::backend)
     .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput), py::arg("outputIdx"), py::arg("data"))
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
     .def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx"))
diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp
index c56e80a47e1142900ff844e7d9889011dee65060..4d4541ab36468bc6b531e0242888dd70c5afc71f 100644
--- a/python_binding/operator/pybind_OperatorTensor.cpp
+++ b/python_binding/operator/pybind_OperatorTensor.cpp
@@ -30,8 +30,8 @@ void init_OperatorTensor(py::module& m){
 
     .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data"))
     .def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data"))
-    .def("compute_output_dims", &OperatorTensor::computeOutputDims)
-    .def("output_dims_forwarded", &OperatorTensor::outputDimsForwarded)
+    .def("forward_dims", &OperatorTensor::forwardDims, py::arg("allow_data_dependency") = false)
+    .def("dims_forwarded", &OperatorTensor::dimsForwarded)
     ;
 }
 }
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f091ea70f9b5e9927e535bd527cd84cf081d9823
--- /dev/null
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -0,0 +1,32 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Scaling.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+
+namespace Aidge {
+
+void init_Scaling(py::module& m) 
+{
+    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, Attributes, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Scaling_Op::getInputsName)
+    .def("get_outputs_name", &Scaling_Op::getOutputsName)
+    .def("attributes_name", &Scaling_Op::staticGetAttrsName);
+    declare_registrable<Scaling_Op>(m, "ScalingOp");
+    m.def("Scaling", &Scaling, py::arg("scaling_factor") = 1.0f, py::arg("nb_bits") = 8, py::arg("is_output_unsigned") = true, py::arg("name") = "");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index f6e2f2225e4858d3385c5d0140a863e7e7705652..63b22608d1737f9a59caffd4517fc0e9cfc4dd91 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -25,32 +25,19 @@
 namespace py = pybind11;
 namespace Aidge {
 
-template <DimIdx_t DIM>
 void declare_Transpose(py::module &m) {
-  const std::string pyClassName("TransposeOp" + std::to_string(DIM) + "D");
-  py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
-  .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName)
-  .def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName);
-
-  declare_registrable<Transpose_Op<DIM>>(m, pyClassName);
-
-  m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
-                                                                  const std::string& name) {
-        AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [{}] does not match DIM [{}]", output_dims_order.size(), DIM);
-        return Transpose<DIM>(to_array<DIM>(output_dims_order.begin()), name);
-    }, py::arg("output_dims_order"),
-       py::arg("name") = "");
-
+  const std::string pyClassName("TransposeOp");
+  py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, Attributes, OperatorTensor>(
+    m, "TransposeOp", py::multiple_inheritance())
+  .def("get_inputs_name", &Transpose_Op::getInputsName)
+  .def("get_outputs_name", &Transpose_Op::getOutputsName)
+  .def("attributes_name", &Transpose_Op::staticGetAttrsName);
+  declare_registrable<Transpose_Op>(m, pyClassName);
+  m.def("Transpose", &Transpose, py::arg("output_dims_order"), py::arg("name") = "");
 }
 
 void init_Transpose(py::module &m) {
-  declare_Transpose<2>(m);
-  declare_Transpose<3>(m);
-  declare_Transpose<4>(m);
-  declare_Transpose<5>(m);
-  declare_Transpose<6>(m);
+  declare_Transpose(m);
 
 }
 } // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 63e5100ac65b5582c7236c2b3467a7d1debcaa36..7b38c2d72d5f4b2eed8d8bbf9f41f47144b51060 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -51,6 +51,7 @@ void init_Pow(py::module&);
 void init_ReduceMean(py::module&);
 void init_ReLU(py::module&);
 void init_Reshape(py::module&);
+void init_Scaling(py::module&);
 void init_Sigmoid(py::module&);
 void init_Slice(py::module&);
 void init_Softmax(py::module&);
@@ -72,6 +73,7 @@ void init_Recipes(py::module&);
 void init_GraphViewHelper(py::module&);
 
 void init_Scheduler(py::module&);
+void init_MemoryManager(py::module&);
 void init_TensorUtils(py::module&);
 void init_Filler(py::module&);
 
@@ -117,6 +119,7 @@ void init_Aidge(py::module& m) {
     init_ReduceMean(m);
     init_ReLU(m);
     init_Reshape(m);
+    init_Scaling(m);
     init_Sigmoid(m);
     init_Slice(m);
     init_Softmax(m);
@@ -134,6 +137,7 @@ void init_Aidge(py::module& m) {
     init_Recipes(m);
     init_GraphViewHelper(m);
     init_Scheduler(m);
+    init_MemoryManager(m);
     init_TensorUtils(m);
     init_Filler(m);
 }
diff --git a/python_binding/recipes/pybind_GraphViewHelper.cpp b/python_binding/recipes/pybind_GraphViewHelper.cpp
index ac56fb4b43eb5b0a737157ec9e64c6771a692816..e65b790d3eba6072e3e1b112c7d841959d4a5672 100644
--- a/python_binding/recipes/pybind_GraphViewHelper.cpp
+++ b/python_binding/recipes/pybind_GraphViewHelper.cpp
@@ -24,5 +24,6 @@ namespace py = pybind11;
 namespace Aidge {
 void init_GraphViewHelper(py::module &m) {
     m.def("producers", &producers, py::arg("graphview"));
+    m.def("compile_gradient", &compile_gradient, py::arg("graphview"));
 }
 } // namespace Aidge
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index f122c411618ce28a641fd46ee568f99cc48e9f58..b85d1c41ed90a5774a9b24062dfda4186c2294d5 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -21,66 +21,70 @@
 namespace py = pybind11;
 
 namespace Aidge {
-void init_Recipes(py::module &m) {
+void init_Recipes(py::module &m) 
+{
 
 
   m.def("fuse_mul_add", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseMulAdd), py::arg("graph_view"), R"mydelimiter(
-    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+    Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
-    :param graph_view: Graph view on which we want to apply the recipie
+    :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
   // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
-  //   Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+  //   recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
   //   :param nodes: The MatMul and Add nodes to fuse.
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
   m.def("remove_dropout",static_cast<void(*)(std::shared_ptr<GraphView>)>(removeDropout), py::arg("graph_view"), R"mydelimiter(
-    Recipie to remove a dropout operator.
+    Recipe to remove a dropout operator.
 
-    :param graph_view: Graph view on which we want to apply the recipie
+    :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
   m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
-    Recipie to remove a flatten operator.
+    Recipe to remove a flatten operator.
 
-    :param graph_view: Graph view on which we want to apply the recipie
+    :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
   // m.def("remove_flatten", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(removeFlatten), py::arg("nodes"), R"mydelimiter(
-  //   Recipie to remove a flatten operator.
+  //   Recipe to remove a flatten operator.
 
   //   :param nodes: The flatten operator to remove.
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
   // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
-  //   Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+  //   Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
   //   :param nodes: The MatMul and Add nodes to fuse.
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
   m.def("fuse_batchnorm", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseBatchNorm), py::arg("graph_view"), R"mydelimiter(
-    Recipie to remove a flatten operator.
+    Recipe to remove a flatten operator.
 
-    :param graph_view: Graph view on which we want to apply the recipie
+    :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
- m.def("get_conv_horizontal_tiling", static_cast<std::set<std::shared_ptr<Node>>(*)(const std::shared_ptr<Node>&, const DimIdx_t, const std::size_t)>(getConvHorizontalTiling),
+  m.def("get_conv_horizontal_tiling", static_cast<std::set<std::shared_ptr<Node>>(*)(const std::shared_ptr<Node>&, const DimIdx_t, const std::size_t)>(getConvHorizontalTiling),
         py::arg("node"), py::arg("axis"), py::arg("nb_slices"));
 
   // m.def("fuse_batchnorm", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseBatchNorm), py::arg("nodes"), R"mydelimiter(
-  //   Recipie to remove a flatten operator.
+  //   recipe to remove a flatten operator.
 
   //   :param nodes: The flatten operator to remove.
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
+
+  m.def("expand_metaops", static_cast<void(*)(std::shared_ptr<GraphView>, bool)>(expandMetaOps), py::arg("graph_view"), py::arg("recursive") = false);
 }
+
 } // namespace Aidge
diff --git a/python_binding/scheduler/pybind_MemoryManager.cpp b/python_binding/scheduler/pybind_MemoryManager.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0f18db405bec0aee9637f2e5f2ecc7b71e502cc5
--- /dev/null
+++ b/python_binding/scheduler/pybind_MemoryManager.cpp
@@ -0,0 +1,108 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include "aidge/scheduler/MemoryManager.hpp"
+
+namespace py = pybind11;
+
+namespace Aidge {
+
+void init_MemoryManager(py::module& m)
+{
+    py::enum_<MemoryManager::OptimizeStrategy>(m, "OptimizeStrategy")
+        .value("None", MemoryManager::OptimizeStrategy::None)
+        .value("OptimizeMaxLifetimeMinSizeFirst", MemoryManager::OptimizeStrategy::OptimizeMaxLifetimeMinSizeFirst)
+        .value("OptimizeMaxLifetimeMaxSizeFirst", MemoryManager::OptimizeStrategy::OptimizeMaxLifetimeMaxSizeFirst)
+        .value("OptimizeMaxHoleMaxLifetimeFirst", MemoryManager::OptimizeStrategy::OptimizeMaxHoleMaxLifetimeFirst)
+        .export_values();
+
+    py::class_<MemoryManager::MemorySpace, std::shared_ptr<MemoryManager::MemorySpace>>(m, "MemorySpace")
+        .def(py::init<MemoryManager::Clock_T, unsigned int, unsigned int, std::set<std::shared_ptr<Node>> >(), py::arg("clock"), py::arg("offset"), py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>())
+        .def_readwrite("offset", &MemoryManager::MemorySpace::offset)
+        .def_readwrite("size", &MemoryManager::MemorySpace::size)
+        .def_readwrite("dependencies", &MemoryManager::MemorySpace::dependencies)
+        .def_readwrite("allocated", &MemoryManager::MemorySpace::allocated)
+        .def_readwrite("released", &MemoryManager::MemorySpace::released);
+
+    py::class_<MemoryManager::MemoryPlane, std::shared_ptr<MemoryManager::MemoryPlane>>(m, "MemoryPlane")
+        .def(py::init<std::shared_ptr<MemoryManager::MemorySpace>, 
+                      MemoryManager::Clock_T, unsigned int, unsigned int,
+                      unsigned int, unsigned int, unsigned int>(),
+                      py::arg("mem_space"), py::arg("clock"), py::arg("offset"), 
+                      py::arg("size"), py::arg("stride"), py::arg("length"), py::arg("count"))
+        .def_readwrite("mem_space", &MemoryManager::MemoryPlane::memSpace)
+        .def_readwrite("allocated", &MemoryManager::MemoryPlane::allocated)
+        .def_readwrite("offset", &MemoryManager::MemoryPlane::offset)
+        .def_readwrite("size", &MemoryManager::MemoryPlane::size)
+        .def_readwrite("stride", &MemoryManager::MemoryPlane::stride)
+        .def_readwrite("length", &MemoryManager::MemoryPlane::length)
+        .def_readwrite("count", &MemoryManager::MemoryPlane::count)
+        .def("get_size", &MemoryManager::MemoryPlane::getSize)
+        .def("get_useful_size", &MemoryManager::MemoryPlane::getUsefulSize)
+        .def("get_contiguous_offset", &MemoryManager::MemoryPlane::getContiguousOffset)
+        .def("get_contiguous_size", &MemoryManager::MemoryPlane::getContiguousSize)
+        .def("get_wrapped_offset", &MemoryManager::MemoryPlane::getWrappedOffset)
+        .def("get_wrapped_size", &MemoryManager::MemoryPlane::getWrappedSize)
+        .def("get_final_offset", &MemoryManager::MemoryPlane::getFinalOffset)
+        .def("get_upper_offset", &MemoryManager::MemoryPlane::getUpperOffset)
+        .def("get_limit", &MemoryManager::MemoryPlane::getLimit);
+
+    py::class_<MemoryManager::MaxLifetimeMinSizeFirst>(m, "MaxLifetimeMinSizeFirst")
+        .def(py::init<unsigned int>(), py::arg("max_lifetime"))
+        .def_readonly("max_lifetime", &MemoryManager::MaxLifetimeMinSizeFirst::maxLifetime)
+        .def("__call__", &MemoryManager::MaxLifetimeMinSizeFirst::operator(), py::arg("p0"), py::arg("p1"));
+
+    py::class_<MemoryManager::MaxLifetimeMaxSizeFirst>(m, "MaxLifetimeMaxSizeFirst")
+        .def(py::init<unsigned int>(), py::arg("max_lifetime"))
+        .def_readonly("max_lifetime", &MemoryManager::MaxLifetimeMaxSizeFirst::maxLifetime)
+        .def("__call__", &MemoryManager::MaxLifetimeMaxSizeFirst::operator(), py::arg("p0"), py::arg("p1"));
+
+    py::class_<MemoryManager::MaxHoleMaxLifetimeFirst>(m, "MaxHoleMaxLifetimeFirst")
+        .def(py::init<unsigned int, MemoryManager*>(), py::arg("max_lifetime"), py::arg("inst"))
+        .def_readonly("max_lifetime", &MemoryManager::MaxHoleMaxLifetimeFirst::maxLifetime)
+        .def_readwrite("inst", &MemoryManager::MaxHoleMaxLifetimeFirst::inst)
+        .def("__call__", &MemoryManager::MaxHoleMaxLifetimeFirst::operator(), py::arg("p0"), py::arg("p1"));
+
+    py::class_<MemoryManager, std::shared_ptr<MemoryManager>>(m, "MemoryManager")
+        .def(py::init<>())
+        .def("reserve", (std::shared_ptr<MemoryManager::MemorySpace> (MemoryManager::*)(unsigned int, const std::set<std::shared_ptr<Node>>&)) &MemoryManager::reserve, py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>())
+        .def("expand", &MemoryManager::expand, py::arg("mem_space"), py::arg("required_size"))
+        .def("allocate", (MemoryManager::MemoryPlane (MemoryManager::*)(unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::allocate, py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("allocate", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::allocate, py::arg("node"), py::arg("size"), py::arg("dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("is_wrap_around", &MemoryManager::isWrapAround, py::arg("mem_space"), py::arg("offset"), py::arg("size"), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("reallocate", (MemoryManager::MemoryPlane (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("mem_space"), py::arg("offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("reallocate", (MemoryManager::MemoryPlane (MemoryManager::*)(const MemoryManager::MemoryPlane&, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("memPlane"), py::arg("extra_offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("reallocate", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>, const std::shared_ptr<Node>&, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("mem_space"), py::arg("node"), py::arg("offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("reallocate", (unsigned int (MemoryManager::*)(const MemoryManager::MemoryPlane&, const std::shared_ptr<Node>&, unsigned int, unsigned int, bool, unsigned int, const std::set<std::shared_ptr<Node>>&, unsigned int, unsigned int, unsigned int)) &MemoryManager::reallocate, py::arg("mem_plane"), py::arg("node"), py::arg("extra_offset"), py::arg("size"), py::arg("wrap_around"), py::arg("extra_size") = 0, py::arg("additional_dependencies") = std::set<std::shared_ptr<Node>>(), py::arg("stride") = 0, py::arg("length") = 1, py::arg("count") = 1)
+        .def("release", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>)) &MemoryManager::release, py::arg("mem_space"))
+        .def("release", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&)) &MemoryManager::release, py::arg("node"))
+        .def("release_dependencies", &MemoryManager::releaseDependencies, py::arg("node"))
+        .def("optimize", &MemoryManager::optimize, py::arg("strategy"))
+        .def("get_offset", &MemoryManager::getOffset, py::arg("node"), py::arg("plane") = 0)
+        .def("get_size", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&, unsigned int) const) &MemoryManager::getSize, py::arg("node"), py::arg("plane"))
+        .def("get_size", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&) const) &MemoryManager::getSize, py::arg("node"))
+        .def("get_peak_usage", &MemoryManager::getPeakUsage)
+        .def("get_max_lifetime", &MemoryManager::getMaxLifetime)
+        .def("get_planes", (const std::vector<MemoryManager::MemoryPlane>& (MemoryManager::*)(const std::shared_ptr<Node>&) const) &MemoryManager::getPlanes, py::arg("node"))
+        .def("get_planes", (const MemoryManager::MemMap_T& (MemoryManager::*)() const) &MemoryManager::getPlanes)
+        .def("get_planes", (MemoryManager::MemMap_T (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>) const) &MemoryManager::getPlanes, py::arg("mem_space"))
+        .def("get_nb_planes", (unsigned int (MemoryManager::*)(const std::shared_ptr<Node>&) const) &MemoryManager::getNbPlanes, py::arg("node"))
+        .def("get_nb_planes", (unsigned int (MemoryManager::*)(std::shared_ptr<MemoryManager::MemorySpace>) const) &MemoryManager::getNbPlanes, py::arg("mem_space"))
+        .def("get_current_tick", &MemoryManager::getCurrentTick)
+        .def("tick", &MemoryManager::tick)
+        .def("log", &MemoryManager::log, py::arg("file_name"))
+        ;
+}
+
+}   // Aidge
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index c0966e54d4f025a607aa9763a3657de5b39d2ff4..b16134da324383a4542965393257288c49dceed0 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -11,6 +11,7 @@
 
 #include <pybind11/pybind11.h>
 #include <pybind11/stl.h>
+#include "aidge/scheduler/MemoryManager.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/scheduler/SequentialScheduler.hpp"
 #include "aidge/scheduler/ParallelScheduler.hpp"
@@ -22,16 +23,18 @@ namespace Aidge {
 void init_Scheduler(py::module& m){
     py::class_<Scheduler, std::shared_ptr<Scheduler>>(m, "Scheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
+    .def("graph_view", &Scheduler::graphView)
     .def("save_scheduling_diagram", &Scheduler::saveSchedulingDiagram, py::arg("file_name"))
     .def("resetScheduling", &Scheduler::resetScheduling)
     .def("generate_scheduling", &Scheduler::generateScheduling)
     .def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0)
+    .def("generate_memory", &Scheduler::generateMemory, py::arg("inc_producers") = false, py::arg("wrap_around_buffer") = false)
     ;
 
     py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>, Scheduler>(m, "SequentialScheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("data")=std::vector<Tensor>())
-    .def("backward", &SequentialScheduler::backward, py::arg("data"), py::arg("instanciate_grad")=true)
+    .def("backward", &SequentialScheduler::backward, py::arg("instanciate_grad")=true)
     ;
 
     py::class_<ParallelScheduler, std::shared_ptr<ParallelScheduler>, Scheduler>(m, "ParallelScheduler")
diff --git a/requirements.txt b/requirements.txt
index 24ce15ab7ead32f98c7ac3edcd34bb2010ff4326..32ec29bb9b826038eb21ce2927f2fef08973b2b8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1 +1,2 @@
 numpy
+Jinja2
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index b6aa4f2e50a5a3db8c3965a8e618fcf4f0299fe8..677bd0246e145ebf760f210000728bd2d99a3807 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -23,29 +23,26 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
         return *this;
     }
     resize(other.dims(), other.strides());
-    setDataType(other.dataType(), false); // do not convert existing data
+    setDataType(other.dataType(), false);  // do not convert existing data
     if (other.hasImpl()) {
         if (hasImpl()) {
             copyFrom(other);
-        }
-        else {
+        } else {
             // Perform a shallow copy only
             setImpl(other.mImpl, other.mImplOffset);
         }
-    }
-    else {
+    } else {
         setImpl(nullptr);
     }
     return *this;
 }
 
-
 Aidge::Tensor::~Tensor() noexcept = default;
 
-
-void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
+void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
+                           std::vector<Aidge::DimSize_t> strides) {
     // TODO: scalar Tensor not handled
-    if (dims.empty()) { // scalar
+    if (dims.empty()) {  // scalar
         mDims = std::vector<DimSize_t>(0);
         mStrides = std::vector<DimSize_t>({1});
         mContiguous = true;
@@ -63,20 +60,21 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
         size_t expectedStride = 1;
         for (int dim = dims.size() - 1; dim >= 0; --dim) {
             strides[dim] = expectedStride;
-            expectedStride*= dims[dim];
+            expectedStride *= dims[dim];
         }
         checkContiguous = false;
-    }
-    else {
-        AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
+    } else {
+        AIDGE_ASSERT(strides.size() == dims.size(),
+                     "Number of strides must match number of dims");
     }
 
     if (mImpl && mImpl.use_count() > 1) {
         // Here we could also create a new storage for this tensor in this case
-        // But, is it more likely that the user really wants this, or that he did a mistake?
-        AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
-    }
-    else {
+        // But, is it more likely that the user really wants this, or that he
+        // did a mistake?
+        AIDGE_ASSERT(dims == mDims && strides == mStrides,
+                     "Cannot resize Tensor with shared storage");
+    } else {
         mDims = dims;
         mStrides = strides;
 
@@ -88,12 +86,12 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
             //     mContiguous&= (strides[i] == expectedStride);
             //     expectedStride*= dims[i];
             // }
-            for (std::size_t i = dims.size()-1; i > 0; --i) {
+            for (std::size_t i = dims.size() - 1; i > 0; --i) {
                 if (strides[i] != expectedStride) {
                     mContiguous = false;
                     break;
                 }
-                expectedStride*= dims[i];
+                expectedStride *= dims[i];
             }
             mContiguous &= (strides[0] == expectedStride);
         }
@@ -106,53 +104,59 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
 }
 
 std::string Aidge::Tensor::toString() const {
-    AIDGE_ASSERT(mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || (mImpl->hostPtr() != nullptr)), "tensor should have a valid host pointer");
+    AIDGE_ASSERT(
+        mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) ||
+                  (mImpl->hostPtr() != nullptr)),
+        "tensor should have a valid host pointer");
 
     // TODO: move lambda elsewhere?
     auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) {
         switch (dt) {
-        case DataType::Float64:
-            return std::to_string(static_cast<double*>(ptr)[idx]);
-        case DataType::Float32:
-            return std::to_string(static_cast<float*>(ptr)[idx]);
-        case DataType::Float16:
-            return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
-        case DataType::Int8:
-            return std::to_string(static_cast<int8_t*>(ptr)[idx]);
-        case DataType::Int16:
-            return std::to_string(static_cast<int16_t*>(ptr)[idx]);
-        case DataType::Int32:
-            return std::to_string(static_cast<int32_t*>(ptr)[idx]);
-        case DataType::Int64:
-            return std::to_string(static_cast<int64_t*>(ptr)[idx]);
-        case DataType::UInt8:
-            return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
-        case DataType::UInt16:
-            return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
-        case DataType::UInt32:
-            return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
-        case DataType::UInt64:
-            return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
-        default:
-            AIDGE_ASSERT(true, "unsupported type to convert to string");
+            case DataType::Float64:
+                return std::to_string(static_cast<double*>(ptr)[idx]);
+            case DataType::Float32:
+                return std::to_string(static_cast<float*>(ptr)[idx]);
+            case DataType::Float16:
+                return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
+            case DataType::Int8:
+                return std::to_string(static_cast<int8_t*>(ptr)[idx]);
+            case DataType::Int16:
+                return std::to_string(static_cast<int16_t*>(ptr)[idx]);
+            case DataType::Int32:
+                return std::to_string(static_cast<int32_t*>(ptr)[idx]);
+            case DataType::Int64:
+                return std::to_string(static_cast<int64_t*>(ptr)[idx]);
+            case DataType::UInt8:
+                return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
+            case DataType::UInt16:
+                return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
+            case DataType::UInt32:
+                return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
+            case DataType::UInt64:
+                return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
+            default:
+                AIDGE_ASSERT(true, "unsupported type to convert to string");
         }
         return std::string("?");  // To make Clang happy
     };
 
-    if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
+    if (dims().empty()) {
+        return ptrToString(mDataType, mImpl->hostPtr(), 0);
+    }
     std::string res;
     std::size_t dim = 0;
     std::size_t counter = 0;
-    if (nbDims()>=2) {
+    if (nbDims() >= 2) {
         std::vector<std::size_t> dimVals(nbDims(), 0);
         res += "{\n";
         while (counter < mSize) {
-            std::string spaceString = std::string((dim+1)<<1,' ');
-            if (dim < nbDims()-2) {
+            std::string spaceString = std::string((dim + 1) << 1, ' ');
+            if (dim < nbDims() - 2) {
                 if (dimVals[dim] == 0) {
                     res += spaceString + "{\n";
                     ++dim;
-                } else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
+                } else if (dimVals[dim] <
+                           static_cast<std::size_t>(dims()[dim])) {
                     res += spaceString + "},\n" + spaceString + "{\n";
                     ++dim;
                 } else {
@@ -161,13 +165,22 @@ std::string Aidge::Tensor::toString() const {
                     dimVals[dim]++;
                 }
             } else {
-                for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
+                for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]);
+                     ++dimVals[dim]) {
                     res += spaceString + "{";
                     for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
-                        res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ",";
+                        res +=
+                            " " +
+                            ptrToString(mDataType, mImpl->hostPtr(mImplOffset),
+                                        counter++) +
+                            ",";
                     }
-                    res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}";
-                    if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
+                    res += " " +
+                           ptrToString(mDataType, mImpl->hostPtr(mImplOffset),
+                                       counter++) +
+                           "}";
+                    if (dimVals[dim] <
+                        static_cast<std::size_t>(dims()[dim] - 1)) {
                         res += ",";
                     }
                     res += "\n";
@@ -179,35 +192,45 @@ std::string Aidge::Tensor::toString() const {
                 dimVals[dim]++;
             }
         }
-
-        for(int i = static_cast<int>(dim); i > 0; --i) {
-            res += std::string((dim+1)<<1,' ') + "}\n";
+        if (nbDims() != 2) {  // If nbDims == 2, parenthesis is already closed
+            for (int i = static_cast<int>(dim); i >= 0; --i) {
+                res += std::string((i + 1) << 1, ' ') + "}\n";
+            }
         }
     } else {
         res += "{";
         for (DimSize_t j = 0; j < dims()[0]; ++j) {
-            res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " ");
+            res += " " +
+                   ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) +
+                   ((j < dims()[0] - 1) ? "," : " ");
         }
     }
     res += "}";
     return res;
 }
 
-Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& fixedCoord) const {
+Aidge::Tensor Aidge::Tensor::extract(
+    const std::vector<std::size_t>& fixedCoord) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
-    AIDGE_ASSERT(fixedCoord.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
+    AIDGE_ASSERT(fixedCoord.size() <= mDims.size(),
+                 "Number of coordinates is higher than number of dimensions");
 
     Tensor subTensor(mDataType);
-    subTensor.resize(std::vector<size_t>(mDims.cbegin() + fixedCoord.size(), mDims.cend()),
-        std::vector<size_t>(mStrides.cbegin() + fixedCoord.size(), mStrides.cend()));
+    subTensor.resize(
+        std::vector<size_t>(mDims.cbegin() + fixedCoord.size(), mDims.cend()),
+        std::vector<size_t>(mStrides.cbegin() + fixedCoord.size(),
+                            mStrides.cend()));
     subTensor.setBackend(mImpl->backend(), mImpl->device().second);
     subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(fixedCoord));
     return subTensor;
 }
 
-Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& startCoord, const std::vector<std::size_t>& dims) const {
+Aidge::Tensor Aidge::Tensor::extract(
+    const std::vector<std::size_t>& startCoord,
+    const std::vector<std::size_t>& dims) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
-    AIDGE_ASSERT(startCoord.size() == mDims.size(), "Coordinates does not match number of dimensions");
+    AIDGE_ASSERT(startCoord.size() == mDims.size(),
+                 "Coordinates does not match number of dimensions");
 
     Tensor subTensor(mDataType);
     subTensor.resize(dims, mStrides);
@@ -224,7 +247,8 @@ void Aidge::Tensor::makeContiguous() {
     // Block so that mImpl ref count is 1 for resize()
     {
         // Create a new storage that will be contiguous
-        std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
+        std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create(
+            {mImpl->backend(), mDataType})(mImpl->device().second, mDims);
         // Copy elements from old to new storage
         std::size_t idx = 0;
         while (idx < mSize) {
@@ -233,13 +257,14 @@ void Aidge::Tensor::makeContiguous() {
             // Determine the size of the contiguous chunk
             std::size_t copySize = 1;
             while (idx + copySize < mSize &&
-                getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize)
-            {
+                   getStorageIdx(getCoord(idx + copySize)) ==
+                       storageIdx + copySize) {
                 ++copySize;
             }
 
             // Perform a single copy for the contiguous chunk
-            newImpl->copy(mImpl->rawPtr(mImplOffset + storageIdx), copySize, idx);
+            newImpl->copy(mImpl->rawPtr(mImplOffset + storageIdx), copySize,
+                          idx);
 
             // Move to the next index after the contiguous chunk
             idx += copySize;
@@ -267,8 +292,10 @@ void Aidge::Tensor::copyCast(const Tensor& src) {
     }
     resize(src.dims());
 
-    AIDGE_ASSERT(src.getImpl()->device() == getImpl()->device(), "cannot copy-cast from a different backend/device");
-    getImpl()->copyCast(src.getImpl()->rawPtr(src.mImplOffset), src.dataType(), src.size(), mImplOffset);
+    AIDGE_ASSERT(src.getImpl()->device() == getImpl()->device(),
+                 "cannot copy-cast from a different backend/device");
+    getImpl()->copyCast(src.getImpl()->rawPtr(src.mImplOffset), src.dataType(),
+                        src.size(), mImplOffset);
 }
 
 void Aidge::Tensor::copyFrom(const Tensor& src) {
@@ -286,16 +313,20 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
     }
     resize(src.dims());
 
-    AIDGE_ASSERT(src.dataType() == dataType(), "cannot copy from a different data type");
-    getImpl()->copyFrom(*(src.getImpl()), src.size(), src.mImplOffset, mImplOffset);
+    AIDGE_ASSERT(src.dataType() == dataType(),
+                 "cannot copy from a different data type");
+    getImpl()->copyFrom(*(src.getImpl()), src.size(), src.mImplOffset,
+                        mImplOffset);
 }
 
-void Aidge::Tensor::copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& movedSrcPtr) {
+void Aidge::Tensor::copyCastFrom(const Tensor& src,
+                                 std::shared_ptr<Tensor>& movedSrcPtr) {
     if (&src == this) {
         return;
     }
 
-    AIDGE_ASSERT(src.isContiguous(), "cannot copy-cast from non-contiguous tensor");
+    AIDGE_ASSERT(src.isContiguous(),
+                 "cannot copy-cast from non-contiguous tensor");
 
     // Current Tensor has necessarily a data type, but may not have backend
     if (!getImpl()) {
@@ -308,29 +339,33 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& mov
     if (dataType() != src.dataType()) {
         // First move data to the target device (only if needed)
         const auto device = getImpl()->device();
-        const Tensor& movedSrc = src.refFrom(movedSrcPtr, device.first, device.second);
+        const Tensor& movedSrc =
+            src.refFrom(movedSrcPtr, device.first, device.second);
         // Second, copy-cast data (necessary)
-        getImpl()->copyCast(movedSrc.getImpl()->rawPtr(movedSrc.mImplOffset), movedSrc.dataType(), movedSrc.size(), mImplOffset);
-    }
-    else {
+        getImpl()->copyCast(movedSrc.getImpl()->rawPtr(movedSrc.mImplOffset),
+                            movedSrc.dataType(), movedSrc.size(), mImplOffset);
+    } else {
         // Directly copy, no conversion necessary
         // Avoid making a double copy if both data type and device are the same
-        getImpl()->copyFrom(*(src.getImpl()), src.size(), src.mImplOffset, mImplOffset);
+        getImpl()->copyFrom(*(src.getImpl()), src.size(), src.mImplOffset,
+                            mImplOffset);
     }
 }
 
 Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) {
     // Scott Meyers' solution to avoid code duplication
-    return const_cast<Tensor&>(static_cast<const Tensor&>(*this).refContiguous(fallback));
+    return const_cast<Tensor&>(
+        static_cast<const Tensor&>(*this).refContiguous(fallback));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) const {
-    AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot refCast() it");
+const Aidge::Tensor& Aidge::Tensor::refContiguous(
+    std::shared_ptr<Tensor>& fallback) const {
+    AIDGE_ASSERT(getImpl(),
+                 "no backend was set for tensor, cannot refCast() it");
 
     if (isContiguous()) {
         return *this;
-    }
-    else {
+    } else {
         if (this != fallback.get()) {
             // Shallow copy to fallback
             *fallback = *this;
@@ -342,96 +377,117 @@ const Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallb
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt) {
+Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
+                                      const Aidge::DataType& dt) {
     // Scott Meyers' solution to avoid code duplication
-    return const_cast<Tensor&>(static_cast<const Tensor&>(*this).refCast(fallback, dt));
+    return const_cast<Tensor&>(
+        static_cast<const Tensor&>(*this).refCast(fallback, dt));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt) const {
-    AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot refCast() it");
+const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
+                                            const Aidge::DataType& dt) const {
+    AIDGE_ASSERT(getImpl(),
+                 "no backend was set for tensor, cannot refCast() it");
 
     if (dt == dataType()) {
         return *this;
-    }
-    else {
+    } else {
         if (this == fallback.get()) {
             // if refFrom() was called before, just change the type
             fallback->setDataType(dt);
-        }
-        else {
-            AIDGE_ASSERT(isContiguous(), "cannot refCast non-contiguous tensor");
+        } else {
+            AIDGE_ASSERT(isContiguous(),
+                         "cannot refCast non-contiguous tensor");
 
             if (!fallback) {
                 fallback = std::make_shared<Tensor>(dt);
-            }
-            else {
-                fallback->setDataType(dt, false); // don't keep previous data (no copy)
+            } else {
+                fallback->setDataType(
+                    dt, false);  // don't keep previous data (no copy)
             }
 
             const auto device = getImpl()->device();
-            fallback->setBackend(device.first, device.second, false); // don't keep previous data (no copy)
+            fallback->setBackend(device.first, device.second,
+                                 false);  // don't keep previous data (no copy)
             fallback->resize(dims());
-            fallback->getImpl()->copyCast(getImpl()->rawPtr(mImplOffset), dataType(), size(), fallback->mImplOffset);
+            fallback->getImpl()->copyCast(getImpl()->rawPtr(mImplOffset),
+                                          dataType(), size(),
+                                          fallback->mImplOffset);
         }
         return *fallback;
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, const std::string &backend, DeviceIdx_t device) {
+Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
+                                      const std::string& backend,
+                                      DeviceIdx_t device) {
     // Scott Meyers' solution to avoid code duplication
-    return const_cast<Tensor&>(static_cast<const Tensor&>(*this).refFrom(fallback, backend, device));
+    return const_cast<Tensor&>(
+        static_cast<const Tensor&>(*this).refFrom(fallback, backend, device));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, const std::string &backend, DeviceIdx_t device) const {
-    AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot refFrom() it");
+const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
+                                            const std::string& backend,
+                                            DeviceIdx_t device) const {
+    AIDGE_ASSERT(getImpl(),
+                 "no backend was set for tensor, cannot refFrom() it");
 
     if (std::make_pair(backend, device) == getImpl()->device()) {
         return *this;
-    }
-    else {
+    } else {
         if (this == fallback.get()) {
             // if refCast() was called before, just change the backend
             fallback->setBackend(backend, device);
-        }
-        else {
-            AIDGE_ASSERT(isContiguous(), "cannot refFrom non-contiguous tensor");
+        } else {
+            AIDGE_ASSERT(isContiguous(),
+                         "cannot refFrom non-contiguous tensor");
 
             if (!fallback) {
                 fallback = std::make_shared<Tensor>(dataType());
-            }
-            else {
-                fallback->setDataType(dataType(), false); // don't keep previous data (no copy)
+            } else {
+                fallback->setDataType(
+                    dataType(), false);  // don't keep previous data (no copy)
             }
 
-            fallback->setBackend(backend, device, false); // don't keep previous data (no copy)
+            fallback->setBackend(backend, device,
+                                 false);  // don't keep previous data (no copy)
             fallback->resize(dims());
-            fallback->getImpl()->copyFrom(*getImpl(), size(), mImplOffset, fallback->mImplOffset);
+            fallback->getImpl()->copyFrom(*getImpl(), size(), mImplOffset,
+                                          fallback->mImplOffset);
         }
         return *fallback;
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device) {
+Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
+                                  const Aidge::DataType& dt,
+                                  const std::string& backend,
+                                  DeviceIdx_t device) {
     // Scott Meyers' solution to avoid code duplication
-    return const_cast<Tensor&>(static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device));
+    return const_cast<Tensor&>(
+        static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device));
 }
 
-const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device) const {
+const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
+                                        const Aidge::DataType& dt,
+                                        const std::string& backend,
+                                        DeviceIdx_t device) const {
     AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot ref() it");
 
-    if (dt == dataType() && std::make_pair(backend, device) == getImpl()->device()) {
+    if (dt == dataType() &&
+        std::make_pair(backend, device) == getImpl()->device()) {
         return *this;
-    }
-    else {
+    } else {
         // Change fallback type, backend & device, without any data copy
         if (!fallback) {
             fallback = std::make_shared<Tensor>(dt);
-        }
-        else {
-            fallback->setDataType(dt, false); // don't keep previous data (no copy)
+        } else {
+            fallback->setDataType(dt,
+                                  false);  // don't keep previous data (no copy)
         }
 
-        fallback->setBackend(backend, device, false); // don't keep previous data (no copy)
+        fallback->setBackend(backend, device,
+                             false);  // don't keep previous data (no copy)
         fallback->resize(dims());
         return *fallback;
     }
@@ -439,7 +495,7 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const
 
 std::set<std::string> Aidge::Tensor::getAvailableBackends() {
     std::set<std::string> backendsList;
-    for(const auto& tupleKey : Registrar<Tensor>::getKeys())
+    for (const auto& tupleKey : Registrar<Tensor>::getKeys())
         backendsList.insert(std::get<0>(tupleKey));
     return backendsList;
 }
diff --git a/src/filler/Filler.cpp b/src/filler/Filler.cpp
index 34e04c2ba84ad493429bceadd54f4fa27df69bcd..f5839087c2e37c5e0288f08716595a0ed66e869e 100644
--- a/src/filler/Filler.cpp
+++ b/src/filler/Filler.cpp
@@ -20,12 +20,12 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-
 void Aidge::calculateFanInFanOut(std::shared_ptr<Aidge::Tensor> tensor,
                                  std::uint32_t& fanIn, std::uint32_t& fanOut) {
-    AIDGE_ASSERT(
-        tensor->nbDims() == 4,
-        "Tensor need to have 4 dimensions to compute FanIn and FanOut.");
+    AIDGE_ASSERT(tensor->nbDims() == 4 || tensor->nbDims() == 2,
+                 "Tensor need to have 4 or 2 dimensions to compute FanIn and "
+                 "FanOut, but found a tensor with {} dims.",
+                 tensor->nbDims());
     // Warning: This function suppose NCXX data layout.
     // Aidge currently only support NCHW but this maybe not be true in the
     // future.
@@ -35,6 +35,6 @@ void Aidge::calculateFanInFanOut(std::shared_ptr<Aidge::Tensor> tensor,
                  "Cannot calculate FanIn if tensor batch size is 0.");
     AIDGE_ASSERT(channelSize != 0,
                  "Cannot calculate FanOut if tensor channel size is 0.");
-    fanIn =  static_cast<std::uint32_t>(tensor->size() / batchSize);
+    fanIn = static_cast<std::uint32_t>(tensor->size() / batchSize);
     fanOut = static_cast<std::uint32_t>(tensor->size() / channelSize);
 }
diff --git a/src/filler/HeFiller.cpp b/src/filler/HeFiller.cpp
index 74d681f1a05c15045d27a0fe678aa676d16af077..ff20b76183c03e7ac90b5c225b3da7a8c6ffb2df 100644
--- a/src/filler/HeFiller.cpp
+++ b/src/filler/HeFiller.cpp
@@ -29,7 +29,9 @@ void Aidge::heFiller(std::shared_ptr<Aidge::Tensor> tensor,
               : (varianceNorm == Aidge::VarianceNorm::Average)
                   ? (fanIn + fanOut) / 2.0
                   : fanOut);
-
+    AIDGE_ASSERT(n > 0,
+                 "Something went wrong division by zero or square root of "
+                 "negative value.");
     const T stdDev(std::sqrt(2.0 / n));
 
     const T mean(varianceNorm == Aidge::VarianceNorm::FanIn ? meanNorm / fanIn
diff --git a/src/filler/XavierFiller.cpp b/src/filler/XavierFiller.cpp
index a1de15971ca8063e504e270fa6d2275d93270460..734874d449c83087ca0e93df7eeb620e178ee7ba 100644
--- a/src/filler/XavierFiller.cpp
+++ b/src/filler/XavierFiller.cpp
@@ -29,6 +29,9 @@ void Aidge::xavierUniformFiller(std::shared_ptr<Aidge::Tensor> tensor,
               : (varianceNorm == Aidge::VarianceNorm::Average)
                   ? (fanIn + fanOut) / 2.0
                   : fanOut);
+    AIDGE_ASSERT(n > 0,
+                 "Something went wrong division by zero or square root of "
+                 "negative value.");
     const T scale(std::sqrt(3.0 / n));
 
     std::uniform_real_distribution<T> uniformDist(-scale, scale);
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index dcd7a06ef8560ad6d4a572cd823e2f9dc357b73c..0bc918995c55a914b29987506578491e2c86fae5 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -391,7 +391,7 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
     forwardDims(dims);
 }
 
-void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims) {
+bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) {
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
@@ -406,22 +406,18 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
     // Ensure every node in the graph is correctly connected
     for (std::shared_ptr<Node> nodePtr : getNodes()) {
         for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
-            // assess if the input was not already set and is a Tensor then link it to parent output
             std::pair<std::shared_ptr<Node>, IOIndex_t> inputI = nodePtr->input(i);
             if (inputI.first) {
-                if ( std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i)) != inputI.first->getOperator()->getRawOutput(inputI.second)) {
-                    if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
-                        // assert provided Data is of "Tensor" type
-                        nodePtr->getOperator()->associateInput(i, inputI.first->getOperator()->getRawOutput(inputI.second));
-                    }
-                    else {
-                        AIDGE_ASSERT(false, "Non-tensor entries not handled yet, for node {} (of type {}).", nodePtr->name(), nodePtr->type());
-                    }
-                }
+                // Check that associated Data are properly connected...
+                AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i) == inputI.first->getOperator()->getRawOutput(inputI.second),
+                  "Input#{} for node {} ({}) is not properly connected to output#{} of node {} ({}): Data or Tensor mismatch!",
+                    i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type());
             } else {
-                AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i)
-                    && !std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(),
+                // Input is missing
+                AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i),
                   "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
+                AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(),
+                  "Empty input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
             }
 
         }
@@ -436,8 +432,8 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
               const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
               // Recompute everytime, even if it was already computed in a
               // previous call of forwardDims(), as the graph may have changed!
-              op->computeOutputDims();
-              if (!op->outputDimsForwarded()) {
+              op->forwardDims(allowDataDependency);
+              if (!op->dimsForwarded()) {
                   nextList.insert(nodePtr);
               }
             }
@@ -450,12 +446,16 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
             std::transform(nextList.begin(), nextList.end(),
                 std::back_inserter(nodesName),
                 [](auto val){ return val->name() + " (" + val->type() + ")"; });
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Unable to forward dimensions (circular dependency and/or wrong dimensions?). Unable to compute output dims for nodes {}.", nodesName);
+
+            Log::warn("Unable to forward dimensions (circular dependency and/or wrong dimensions and/or data dependent dimension?). Unable to compute output dims for nodes {}.", nodesName);
+            return false;
         }
 
         listNodes.swap(nextList);
     }
     while (!listNodes.empty());
+
+    return listNodes.empty();
 }
 
 void Aidge::GraphView::setBackend(const std::string &backend, const DeviceIdx_t device) const {
@@ -908,7 +908,7 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
                                                      newGraph->getOrderedOutputs();
 
     auto inputParents = std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(oldOIn.size());
-    auto outputChildren = std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(oldOOut.size());
+    auto outputChildren = std::vector<std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>>(oldOOut.size());
 
     // keep in memory every node related to the node to replace :
     // Parent
@@ -919,19 +919,12 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
         // inputParent.first -> addChild(newOI[i].first, inputParent.second, newOI[i].second);
     }
     // Children
-    for (std::size_t i = 0; i < oldOOut.size();) {
+    for (std::size_t i = 0; i < oldOOut.size(); ++i) {
         std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> outputChild =
               oldOOut[i].first -> output(oldOOut[i].second);
-        if (outputChild.empty()) {
-            outputChildren[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>({nullptr, gk_IODefaultIndex});
-            ++i;
-        }
-        else {
-            for (const auto& child : outputChild) {
-                if (oldNodes.find(child.first) == oldNodes.cend()) {
-                    outputChildren[i] = child;
-                    ++i;
-                }
+        for (const auto& child : outputChild) {
+            if (oldNodes.find(child.first) == oldNodes.cend()) {
+                outputChildren[i].push_back(child);
             }
         }
     }
@@ -969,8 +962,8 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
             }
         }
         for (std::size_t o = 0; o < oldOOut.size(); ++o) {
-            if (outputChildren[o].first) {
-                newOOut[o].first -> addChild(outputChildren[o].first, newOOut[o].second, outputChildren[o].second);
+            for (const auto& child : outputChildren[o]) {
+                newOOut[o].first -> addChild(child.first, newOOut[o].second, child.second);
             }
         }
     }
@@ -980,15 +973,21 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
         if (newNodes.size() == 0) {
             // Case 3
             if (oldOIn.size() == oldOOut.size()) {
+                // Same number of inputs and outputs: connect each input to the corresponding output
                 for (std::size_t i = 0; i < oldOIn.size(); ++i) {
                     if (inputParents[i].first) {
-                      inputParents[i].first -> addChild(outputChildren[i].first, inputParents[i].second, outputChildren[i].second);
+                      for (const auto& child : outputChildren[i]) {
+                        inputParents[i].first -> addChild(child.first, inputParents[i].second, child.second);
+                      }
                     }
                 }
             }
             else if ((oldOIn.size() == 1) && (inputParents[0].first)) {
-                for (std::size_t i = 0; i < oldOIn.size(); ++i) {
-                    inputParents[0].first -> addChild(outputChildren[i].first, inputParents[0].second, outputChildren[i].second);
+                // Single input: connect the only input to all the outputs
+                for (std::size_t i = 0; i < oldOOut.size(); ++i) {
+                    for (const auto& child : outputChildren[i]) {
+                        inputParents[0].first -> addChild(child.first, inputParents[0].second, child.second);
+                    }
                 }
             }
         }
@@ -1009,8 +1008,8 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
                 }
             }
             for (std::size_t o = 0; o < oldOOut.size(); ++o) {
-                if (outputChildren[o].first) {
-                    newOOut[o].first -> addChild(outputChildren[o].first, newOOut[o].second, outputChildren[o].second);
+                for (const auto& child : outputChildren[o]) {
+                    newOOut[o].first -> addChild(child.first, newOOut[o].second, child.second);
                 }
             }
         }
@@ -1059,6 +1058,12 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
     return true;
 }
 
+void Aidge::GraphView::updateInputsOutputs() {
+  for (auto node : mNodes) {
+    updateInputsOutputsNew(node);
+  }
+}
+
 void Aidge::GraphView::updateInputsOutputsNew(std::shared_ptr<Node> newNode) {
   // Can be called several times with the same node, e.g. when addChild() is
   // called on a node already part of the GraphView. In this case, inputs/outputs
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 149691f796d1d84212e9d7842a28e4cb79469e6a..b08bb4c2056e8c14f5b1dd3aae62fbacf8d8c14e 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -57,7 +57,10 @@ Aidge::Connector Aidge::Node::operator()(const std::vector<Connector>& ctors) {
 //        INNER
 ///////////////////////////////////////////////////////
 
-void Aidge::Node::setName(const std::string& name) { mName = name; }
+void Aidge::Node::setName(const std::string& name) {
+    for (auto graphView : views()) graphView->updateNodeName(mName, name);
+    mName = name;
+}
 
 ///////////////////////////////////////////////////////
 //        OPERATORS
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 85bc4b7aef53e8064a8f31815a42689013880812..9b77ffcbe0117292ed0aa520309febf709e8dd68 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -32,7 +32,7 @@ Aidge::Add_Op::Add_Op(const Add_Op& op)
     }
 }
 
-void Aidge::Add_Op::computeOutputDims() {
+bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     bool associated = (nbInputs() > 0); // do not compute anything if no input
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
@@ -63,13 +63,16 @@ void Aidge::Add_Op::computeOutputDims() {
                         *it = dim;
                     }
                     else if ((dim != *it) && (dim != 1)) {
-                        AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Add operation");
+                        AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Add Operation: {} for previous inputs vs {} for input#{}",
+                            outDims, getInput(i)->dims(), i);
                     }
                 }
             }
         }
         mOutputs[0]->resize(outDims);
     }
+
+    return associated;
 }
 
 void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index acb097668bce0ff6f335f577faed503e086db79f..07123bc88aa1da22bfa98166d6a01af8d66be98d 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -36,7 +36,7 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op): Operator
 }
 
 template <Aidge::DimIdx_t DIM>
-void Aidge::AvgPooling_Op<DIM>::computeOutputDims() {
+bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
@@ -54,7 +54,9 @@ void Aidge::AvgPooling_Op<DIM>::computeOutputDims() {
                                         static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
         }
         getOutput(0)->resize(outputDims);
+        return true;
     }
+    return false;
 }
 
 
@@ -69,7 +71,7 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz
     if (firstEltDims.size() != outputDims.size()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
     }
-    if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+    if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
         // Offset
         std::vector<DimSize_t> inputIdxDims = firstEltDims;
 
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index b14f0238809b9ec9b6b186d093ecf3b1554865cb..14bf65763c024ffe28d30654a49c9630737a12fd 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -36,7 +36,7 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op): OperatorTen
 }
 
 template <Aidge::DimIdx_t DIM>
-void Aidge::BatchNorm_Op<DIM>::computeOutputDims() {
+bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     bool associated = true;
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
@@ -53,6 +53,7 @@ void Aidge::BatchNorm_Op<DIM>::computeOutputDims() {
         }
         mOutputs[0]->resize(getInput(0)->dims());
     }
+    return associated;
 }
 
 template <Aidge::DimIdx_t DIM>
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index 4f1ac55898b11668ba1c2f5299f8e1ca1d4e5df1..f1c8e25e17c80d58d444a1ddddbaa428b2fc4c41 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -20,22 +20,19 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Cast_Op::Type = "Cast";
-
-void Aidge::Cast_Op::forward() {
-    if (mImpl) {
-        mImpl->forward();
-    }
-    else {
-        mOutputs[0]->copyCast(*(mInputs[0]));
-    }
-
-    runHooks();
+void Aidge::Cast_OpImpl::forward() {
+    const Cast_Op& op = dynamic_cast<const Cast_Op&>(mOp);
+    op.getOutput(0)->copyCast(*(op.getInput(0)));
 }
 
+const std::string Aidge::Cast_Op::Type = "Cast";
+
 void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Cast_Op>::exists({name})) {
         SET_IMPL_MACRO(Cast_Op, *this, name);
     }
+    else {
+        mImpl = std::make_shared<Cast_OpImpl>(*this);
+    }
     mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 7df5b6dbf6122da44aed280da0d717232ba42fef..ee06ce69b135e11fe3ed5be8fa9f501debb6acd5 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -18,9 +18,48 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+void Aidge::Concat_OpImpl::forward() {
+    const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
+    const DimSize_t axis = op.template getAttr<DimSize_t>("Axis");
+
+    assert(op.getInput(0) && "missing input in Concat operator");
+    DataType datatypeFirstInput = op.getInput(0)->dataType();
+    for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) {
+        assert(op.getInput(i) && "missing input in Concat operator");
+        assert(op.getInput(i)->dataType() == datatypeFirstInput);
+    }
+
+    DimSize_t outputAxisValue = 0;
+    for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
+        outputAxisValue += op.getInput(i)->dims()[axis];
+    }
+
+    DimSize_t prodDimLower = 1;
+    for (DimIdx_t i = 0; i < axis; ++i) {
+        prodDimLower *= op.getInput(0)->dims()[i];
+    }
+    DimSize_t prodDimHigher = 1;
+    for (DimIdx_t i = axis + 1; static_cast<std::size_t>(i) < op.getInput(0)->dims().size();
+         ++i) {
+        prodDimHigher *= op.getInput(0)->dims()[i];
+    }
+
+    std::size_t oIndexStart = 0;
+    // std::size_t oIndex = 0;
+    for (std::size_t inputId = 0; inputId < op.nbInputs(); ++inputId) {
+        // oIndex = oIndexStart;
+        const DimSize_t iOffset = prodDimHigher*op.getInput(inputId)->dims()[axis];
+        for (std::size_t iIndex = 0, oIndex = oIndexStart; iIndex < prodDimLower; ++iIndex) {
+            op.getOutput(0)->getImpl()->copy(op.getInput(inputId)->getImpl()->rawPtr(iIndex*iOffset), iOffset, oIndex);
+            oIndex += prodDimHigher*outputAxisValue;
+        }
+        oIndexStart += op.getInput(inputId)->dims()[axis]*prodDimHigher;
+    }
+}
+
 const std::string Aidge::Concat_Op::Type = "Concat";
 
-void Aidge::Concat_Op::computeOutputDims() {
+bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
     // Every input is non-empty with the same number of dimensions
     bool associated = (getInput(0) != nullptr);
     associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
@@ -49,9 +88,16 @@ void Aidge::Concat_Op::computeOutputDims() {
     if (associated) {
         getOutput(0)->resize(outputDims);
     }
+
+    return associated;
 }
 
 void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(Concat_Op, *this, name);
+    if (Registrar<Concat_Op>::exists({name})) {
+        SET_IMPL_MACRO(Concat_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Concat_OpImpl>(*this);
+    }
     mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 5ffe5f08dbcbfe42c406846990c432a7fbd325e0..e6300d08c2c792c8a3eb66b307aca53f9d2acc73 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -22,7 +22,7 @@
 
 const std::string Aidge::Div_Op::Type = "Div";
 
-void Aidge::Div_Op::computeOutputDims() {
+bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
@@ -44,13 +44,17 @@ void Aidge::Div_Op::computeOutputDims() {
                 outDims[out_id] = lowDims[low_id];
             }
             else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Div Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
             }
             --out_id;
             --low_id;
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 9865d64f6a0b87be96244bc4b39c91b605f02b6f..ba7e29e7b6543a570ceede6158bd306286037c10 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -36,7 +36,7 @@ void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::sh
         mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()});
 }
 
-void Aidge::FC_Op::computeOutputDims() {
+bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
     bool associated = true;
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
         if (!getInput(i)) {
@@ -48,6 +48,8 @@ void Aidge::FC_Op::computeOutputDims() {
         // <batch, OutChannels>
         mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
     }
+
+    return associated;
 }
 
 void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 259e6513994970eb7e677f44c981888388825fae..7b0945271660be8f309024f46c258e6a7e2193e5 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -20,10 +20,39 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+void Aidge::Gather_OpImpl::forward() {
+    const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
+    const auto axis = op.template getAttr<std::int64_t>("Axis");
+
+    const std::size_t axisIdx = axis>=0 ?
+                                axis :
+                                static_cast<std::size_t>(axis) + op.getInput(0)->dims().size();
+
+    std::size_t postAxisElems = 1;
+    for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) {
+        postAxisElems *= op.getInput(0)->dims()[i];
+    }
+    std::size_t preAxisElems = 1;
+    for (std::size_t i = 0; i < axisIdx; ++i) {
+        preAxisElems *= op.getInput(0)->dims()[i];
+    }
+
+    const auto indices = op.template getAttr<std::vector<std::int64_t>>("Indices");
+    std::size_t outputOffset = 0;
+    for (std::size_t i=0; i<preAxisElems; ++i)
+    {
+        for(std::size_t j=0; j<indices.size(); ++j)
+        {
+            const std::size_t idx = indices[j] >= 0 ? indices[j] : static_cast<std::size_t>(indices[j]) + op.getInput(0)->dims()[axisIdx];
+            op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset);
+            outputOffset += postAxisElems;
+        }
+    }
+}
 
 const std::string Aidge::Gather_Op::Type = "Gather";
 
-void Aidge::Gather_Op::computeOutputDims() {
+bool Aidge::Gather_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
@@ -46,10 +75,18 @@ void Aidge::Gather_Op::computeOutputDims() {
         }
 
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Gather_Op, *this, name);
+    if (Registrar<Gather_Op>::exists({name})) {
+        SET_IMPL_MACRO(Gather_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Gather_OpImpl>(*this);
+    }
     mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index 3eae49b69ce639529d49dd1c0d241f12ece5d98b..fdf3036fe7eeccb2dfd9e21faf834e27854e45f3 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -25,8 +25,8 @@ const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Inpu
     return [nbOutputs, inputIdx](const std::vector<std::vector<std::size_t>>& inputsDims) { return std::vector<std::vector<std::size_t>>(nbOutputs, inputsDims[inputIdx]); };
 }
 
-void Aidge::GenericOperator_Op::computeOutputDims() {
-    if (mComputeOutputDims) {
+bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (mForwardDims) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>());
         for (std::size_t i = 0; i < nbInputs(); ++i) {
             if (getInput(i)) {
@@ -34,23 +34,25 @@ void Aidge::GenericOperator_Op::computeOutputDims() {
             }
         }
 
-        const auto& outputsDims = mComputeOutputDims(inputsDims);
+        const auto& outputsDims = mForwardDims(inputsDims);
         AIDGE_ASSERT((outputsDims.size() == nbOutputs()), "The provided ComputeDimsFunc function returns the wrong number of outputs");
         for (std::size_t i = 0; i < nbOutputs(); ++i) {
             mOutputs[i]->resize(outputsDims[i]);
         }
+        return true;
     }
     else {
-        AIDGE_ASSERT(false, "Cannot compute output dim of a GenericOperator");
+        Log::warn("GenericOperator: cannot compute output dims, no ComputeDimsFunc function provided.");
+        return false;
     }
 }
 
-bool Aidge::GenericOperator_Op::outputDimsForwarded() const {
-    if (mComputeOutputDims) {
+bool Aidge::GenericOperator_Op::dimsForwarded() const {
+    if (mForwardDims) {
         return !(mOutputs[0]->empty());
     }
     else {
-        AIDGE_ASSERT(false, "GenericOperator cannot forward dims");
+        Log::notice("GenericOperator: not output dims forwarded, no ComputeDimsFunc function provided.");
         return false;
     }
-}
\ No newline at end of file
+}
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index 618ccc06f40da4b1f1c491487fd978da768652e4..b09426f8f835eda5600b630488ef18c5b08ba32a 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -21,18 +21,13 @@
 
 const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling";
 
-void Aidge::GlobalAveragePooling_Op::computeOutputDims() {
+bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
   // error checking
   if (!getInput(0)) {
     AIDGE_THROW_OR_ABORT(std::runtime_error,
                          "GlobalAveragePooling : The input was not connected");
   }
-  // necessary bc forward dims sometimes passes with an empty vector before
-  // doing another pass
-  else if (getInput(0)->empty()) {
-    return;
-  // computation
-  } else {
+  else if (!getInput(0)->empty()) {
     AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
                  "GlobalAveragePooling :  needs at least a 3 dimensions input, "
                  "number of input dim : {}",
@@ -43,7 +38,10 @@ void Aidge::GlobalAveragePooling_Op::computeOutputDims() {
     const std::vector<DimSize_t> out_dims{getInput(0)->dims().at(0),
                                           getInput(0)->dims().at(1)};
     mOutputs[0]->resize(out_dims);
+    return true;
   }
+
+  return false;
 }
 
 void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index f57906dd4f3564b52cde16236bda87370e8f86d7..2b8107bfc77ef70b33a97032d350a42ec5f3f466 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -13,4 +13,10 @@
 
 #include "aidge/operator/Identity.hpp"
 
-const std::string Aidge::Identity_Op::Type = "Identity";
\ No newline at end of file
+const std::string Aidge::Identity_Op::Type = "Identity";
+
+void Aidge::Identity_Op::forward() {
+    // Perform a shallow copy
+    *(mOutputs[0]) = *(mInputs[0]);
+    runHooks();
+}
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 56899875338d487294163aa018e0d98b5f7a5269..8f7548155cde4c7187f7a7fe96a44c4accd2c302 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -20,13 +20,14 @@
 
 const std::string Aidge::MatMul_Op::Type = "MatMul";
 
-void Aidge::MatMul_Op::computeOutputDims() {
+bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input. Cannot compute output dimensions for MatMul Operator.");
     }
     if (getInput(0)->empty() && getInput(1)->empty()) {
         // both inputs are scalar
         mOutputs[0]->resize({});
+        return true;
     }
     else if (!getInput(0)->empty() && !getInput(1)->empty())
     {
@@ -69,7 +70,10 @@ void Aidge::MatMul_Op::computeOutputDims() {
             outDims.push_back(dims1[dims_size-1]);
 
         mOutputs[0]->resize(outDims);
+        return true;
     }
+    
+    return false;
 }
 
 void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 6e54a234d2fc78c8e8e9a43a7528709c8e51adc4..e08b5f1054f07a9dcc1722d219ebce022f994d61 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -20,9 +20,74 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
+Aidge::Elts_t Aidge::Memorize_OpImpl::getNbRequiredData(
+    Aidge::IOIndex_t inputIdx) const
+{
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
+
+    if (scheduleStep == 0 && inputIdx == 0) {
+        // No data input is required for the initial step.
+        // Initialization data is required however.
+        return Elts_t::NoneElts();
+    }
+    else if (scheduleStep > 0 && inputIdx == 1) {
+        // No initialization data is required after the initial step.
+        return Elts_t::NoneElts();
+    }
+    else {
+        return OperatorImpl::getNbRequiredData(inputIdx);
+    }
+}
+
+Aidge::Elts_t Aidge::Memorize_OpImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
+    assert(mOp.getRawOutput(outputIdx) && "requires valid output");
+
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
+    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
+
+    if (endStep > 0 && outputIdx == 1 && scheduleStep >= endStep) {
+        return Elts_t::NoneElts();
+    }
+    else {
+        return Elts_t::DataElts(op.getOutput(outputIdx)->size());
+    }
+}
+
+void Aidge::Memorize_OpImpl::updateConsummerProducer() {
+    OperatorImpl::updateConsummerProducer();
+
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
+    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
+    AIDGE_ASSERT(endStep == 0 || scheduleStep <= endStep, "cannot update consumer producer anymore, number of cycles exceeded");
+}
+
+void Aidge::Memorize_OpImpl::forward() {
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    const unsigned int forwardStep = op.template getAttr<MemorizeAttr::ForwardStep>();
+    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
+    AIDGE_ASSERT(endStep == 0 || forwardStep <= endStep, "cannot forward anymore, number of cycles exceeded");
+
+    if (forwardStep == 0) {
+        op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size());
+    }
+    else {
+        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+    }
+}
+
 const std::string Aidge::Memorize_Op::Type = "Memorize";
 
-void Aidge::Memorize_Op::computeOutputDims() {
+void Aidge::Memorize_Op::updateConsummerProducer() {
+    Operator::updateConsummerProducer();
+    ++this->template getAttr<MemorizeAttr::ScheduleStep>();
+    this->template getAttr<MemorizeAttr::ForwardStep>() = 0;
+}
+
+bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) {
     for (size_t i = 0; i < 2; ++i) {
         if (!getInput(i)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
@@ -34,19 +99,18 @@ void Aidge::Memorize_Op::computeOutputDims() {
     if (!(getInput(0)->empty())) {
         const auto expectedDims =  getInput(0)->dims();
         mOutputs[0]->resize(expectedDims);
+        return true;
     }
     else if (!(getInput(1)->empty())) {
         const auto expectedDims =  getInput(1)->dims();
         mOutputs[0]->resize(expectedDims);
+        return true;
     }
-}
 
-void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Memorize_Op>::create({name})(*this);
-    mOutputs[0]->setBackend(name, device);
+    return false;
 }
 
-bool Aidge::Memorize_Op::outputDimsForwarded() const {
+bool Aidge::Memorize_Op::dimsForwarded() const {
     // Only check the output dims
     bool forwarded = true;
     // check outputs have been filled
@@ -56,10 +120,14 @@ bool Aidge::Memorize_Op::outputDimsForwarded() const {
     return forwarded;
 }
 
-void Aidge::Memorize_Op::updateConsummerProducer() {
-    Operator::updateConsummerProducer();
-    ++this->template getAttr<MemorizeAttr::ScheduleStep>();
-    this->template getAttr<MemorizeAttr::ForwardStep>() = 0;
+void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Memorize_Op>::exists({name})){
+        SET_IMPL_MACRO(Memorize_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Memorize_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
 }
 
 void Aidge::Memorize_Op::forward() {
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 46e9e1173af98ed5711aa0bbce54705fb61dc03c..36ff1854703d015980a1943390eb87d0863d877f 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -37,6 +37,37 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar
     }
 }
 
+void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) {
+    AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
+    AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
+
+    const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
+    inputOp.first->getOperator()->associateInput(inputOp.second, data);
+
+    // Associate inputs for custom implementation
+    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
+}
+
+void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Data>& data) {
+    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
+
+    const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
+    inputOp.first->getOperator()->setInput(inputOp.second, data);
+
+    // Associate inputs for custom implementation
+    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
+}
+
+void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, std::shared_ptr<Data>&& data) {
+    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
+
+    const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
+    inputOp.first->getOperator()->setInput(inputOp.second, std::forward<std::shared_ptr<Data>>(data));
+
+    // Associate inputs for custom implementation
+    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
+}
+
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
     if (mImpl) {
         return mImpl->getNbRequiredData(inputIdx);
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index d8776e32fca909663bafe3fae3ebf9f5616c69c9..0f635ea655676e488343bb55d9de6423a997af7d 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -12,15 +12,19 @@
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Move.hpp"
 
+void Aidge::Move_OpImpl::forward() {
+    const Move_Op& op = dynamic_cast<const Move_Op&>(mOp);
+    op.getOutput(0)->copyFrom(*(op.getInput(0)));
+}
+
 const std::string Aidge::Move_Op::Type = "Move";
 
-void Aidge::Move_Op::forward() {
-    if (mImpl) {
-        mImpl->forward();
+void Aidge::Move_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Move_Op>::exists({mInputs[0]->getImpl()->backend(), name})) {
+        SET_IMPL_MACRO(Move_Op, *this, {mInputs[0]->getImpl()->backend(), name});
     }
     else {
-        mOutputs[0]->copyFrom(*(mInputs[0]));
+        mImpl = std::make_shared<Move_OpImpl>(*this);
     }
-
-    runHooks();
+    mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index 89bef9e0edcf6731dfbaf9ebf48ebddf5b71e815..426de388f31391fb5e59446d50e50de94ca5f8a1 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -23,7 +23,7 @@
 
 const std::string Aidge::Mul_Op::Type = "Mul";
 
-void Aidge::Mul_Op::computeOutputDims() {
+bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
@@ -45,16 +45,17 @@ void Aidge::Mul_Op::computeOutputDims() {
                 outDims[out_id] = lowDims[low_id];
             }
             else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Mul Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
             }
             --out_id;
             --low_id;
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
-    else if (!getInput(0)->empty() && !getInput(1)->empty()) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul: {} and {}", getInput(0)->dims(), getInput(1)->dims());
-    }
+
+    return false;
 }
 
 void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index b85c18040ad84a1e9b1ea1f8b475c32260b6587a..2a60f580f3279170a0f1ff417cea96ae7cfa981f 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -119,7 +119,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
     if (nbInputs() != nbData()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function.");
     }
-    if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
+    if (!dimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
     }
     for (DimIdx_t i = 0; i < outputDims.size(); ++i) {
@@ -131,7 +131,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
     return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
 }
 
-void Aidge::OperatorTensor::computeOutputDims() {
+bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     bool associated = (nbInputs() > 0); // do not compute anything if no input
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
@@ -151,9 +151,11 @@ void Aidge::OperatorTensor::computeOutputDims() {
         }
         mOutputs[0]->resize(expectedDims);
     }
+
+    return associated;
 }
 
-bool Aidge::OperatorTensor::outputDimsForwarded() const {
+bool Aidge::OperatorTensor::dimsForwarded() const {
     bool forwarded = true;
     // check both inputs and outputs have been filled
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
@@ -176,4 +178,12 @@ void Aidge::OperatorTensor::setDataType(const DataType& dataType) const {
         AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
         getInput(i)->setDataType(dataType);
     }
-}
\ No newline at end of file
+}
+
+void Aidge::OperatorTensor::forward() {
+    if (!dimsForwarded()) {
+        forwardDims();
+    }
+
+    Operator::forward();
+}
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 06999e301ce0968b2d9979e47f412c02e59de3ad..18325d80a94f35878ededca839ec809000527c39 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -20,10 +20,24 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+Aidge::Elts_t Aidge::Pop_OpImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
+
+    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+    return Elts_t::DataElts(op.getInput(inputIdx)->size()
+        / op.getInput(inputIdx)->dims()[0]);
+}
+
+void Aidge::Pop_OpImpl::forward() {
+    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+    assert(op.getInput(0) && "missing input #0");
+    const unsigned int forwardStep = op.template getAttr<PopAttr::ForwardStep>();
+    *op.getOutput(0) = op.getInput(0)->extract({forwardStep});
+}
 
 const std::string Aidge::Pop_Op::Type = "Pop";
 
-void Aidge::Pop_Op::computeOutputDims() {
+bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
@@ -32,7 +46,10 @@ void Aidge::Pop_Op::computeOutputDims() {
         auto inputDims = getInput(0)->dims();
         inputDims.erase(inputDims.begin());
         getOutput(0)->resize(inputDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Pop_Op::updateConsummerProducer() {
@@ -40,12 +57,17 @@ void Aidge::Pop_Op::updateConsummerProducer() {
     this->template getAttr<PopAttr::ForwardStep>() = 0;
 }
 
+void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Pop_Op>::exists({name})){
+        SET_IMPL_MACRO(Pop_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Pop_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
+
 void Aidge::Pop_Op::forward() {
     Operator::forward();
     ++this->template getAttr<PopAttr::ForwardStep>();
 }
-
-void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Pop_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 72a04de04fda8a432309de8b4a69b1dfb6af1370..135c792345b0caf1166e671a8dad7d5b49b42ee7 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -22,7 +22,7 @@
 
 const std::string Aidge::Pow_Op::Type = "Pow";
 
-void Aidge::Pow_Op::computeOutputDims() {
+bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
@@ -44,13 +44,17 @@ void Aidge::Pow_Op::computeOutputDims() {
                 outDims[out_id] = lowDims[low_id];
             }
             else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Pow Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
             }
             --out_id;
             --low_id;
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 38bbbc14846f8f4356602b1d3a66058439bb37d0..7059ea7e989d789b4cff0ed895fc2c5ec0ad81bc 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -32,28 +32,12 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
       Attributes_(attr<ProdAttr::Constant>(constant))
 {
     mOutputs[0] = tensor; // copy the pointer of the Tensor
-#ifdef PYBIND
-    if(Py_IsInitialized()) {
-        auto obj = py::cast(&(*this));
-        setImpl((mOutputs[0]->hasImpl()) ?
-            (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ?
-                Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) :
-                std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) :
-            std::make_shared<OperatorImpl>(*this, ""));
-    } else {
-        setImpl((mOutputs[0]->hasImpl()) ?
-            (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ?
-                Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) :
-                std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) :
-            std::make_shared<OperatorImpl>(*this, ""));
+    if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
+        SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
+    }
+    else {
+        mImpl = std::make_shared<OperatorImpl>(*this);
     }
-#else
-    setImpl((mOutputs[0]->hasImpl()) ?
-                (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ?
-                    Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) :
-                    std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) :
-                std::make_shared<OperatorImpl>(*this, ""));
-#endif
 }
 
 /**
@@ -66,57 +50,28 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
       Attributes_(op)
 {
     mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0)));
-#ifdef PYBIND
-    if(Py_IsInitialized()) {
-            auto obj = py::cast(&(*this));
-            setImpl((mOutputs[0]->hasImpl()) ?
-                (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ?
-                    Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) :
-                    std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) :
-                std::make_shared<OperatorImpl>(*this, ""));
-        } else {
-            setImpl((mOutputs[0]->hasImpl()) ?
-                (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ?
-                    Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) :
-                    std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) :
-                std::make_shared<OperatorImpl>(*this, ""));
-        }
-#else
-    setImpl((mOutputs[0]->hasImpl()) ?
-                (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ?
-                    Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) :
-                    std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) :
-                std::make_shared<OperatorImpl>(*this, ""));
-#endif
-    // if (mOutputs[0]->hasImpl()) {
-        // if (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
-        //     setImpl(Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this));
-        // }
-        // else  {
-        //     mImpl = std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend());
-        // }
-
-    // } else {
-    //     mImpl = nullptr;
-    // }
+    if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
+        SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
+    }
+    else {
+        mImpl = std::make_shared<OperatorImpl>(*this);
+    }
 }
 
 void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-#ifdef PYBIND
-    if(Py_IsInitialized()) {
-            auto obj = py::cast(&(*this));
-            setImpl((Registrar<Producer_Op>::exists({name})) ?
-                    Registrar<Producer_Op>::create(name)(*this) :
-                    std::make_shared<OperatorImpl>(*this, ""));
-        } else {
-            setImpl((Registrar<Producer_Op>::exists({name})) ?
-                    Registrar<Producer_Op>::create(name)(*this) :
-                    std::make_shared<OperatorImpl>(*this, ""));
-        }
-#else
-    setImpl((Registrar<Producer_Op>::exists({name})) ?
-        Registrar<Producer_Op>::create(name)(*this) :
-        std::make_shared<OperatorImpl>(*this, ""));
-#endif
+    if (Registrar<Producer_Op>::exists({name})){
+        SET_IMPL_MACRO(Producer_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<OperatorImpl>(*this);
+    }
     mOutputs[0]->setBackend(name, device);
-}
\ No newline at end of file
+}
+
+void Aidge::Producer_Op::forward() {
+    if (!backend().empty()) {
+        mImpl->forward();
+    }
+
+    runHooks();
+}
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 0de676e22ec668a9b41d7d61f184465d431715a2..28e39b6d3387a0371c0505dc0a7b350e83a2bbaf 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -26,34 +26,35 @@
 
 const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 
-void Aidge::ReduceMean_Op::computeOutputDims() {
-        if (!getInput(0)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (!getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+    }
+    if (!getInput(0)->empty()) {
+        // make Axes attribute positive
+        std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
+        std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
+            if (val < 0)
+                val+=static_cast<std::int32_t>(getInput(0)->nbDims());
+        });
+        std::sort(axes.begin(), axes.end());
+
+        // build output dimensions
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
+            std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
         }
-        if (!getInput(0)->empty()) {
-            // make Axes attribute positive
-            std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
-            std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
-                if (val < 0)
-                    val+=static_cast<std::int32_t>(getInput(0)->nbDims());
-            });
-            std::sort(axes.begin(), axes.end());
-
-            // build output dimensions
-            std::vector<DimSize_t> outDims = getInput(0)->dims();
-            if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
-                std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
-            }
-            else {
-                for (auto it = axes.crbegin(); it != axes.crend(); ++it)
-                    outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
-            }
-
-            // TODO: change {1} for {} when scalar Tensors are better handled.
-            mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
-
+        else {
+            for (auto it = axes.crbegin(); it != axes.crend(); ++it)
+                outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
         }
+
+        // TODO: change {1} for {} when scalar Tensors are better handled.
+        mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
+        return true;
     }
+    return false;
+}
 
 void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(ReduceMean_Op, *this, name);
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 79cfc0659849248bac791ba5b1db25096824e928..ab53c094dac09879c1bec86509463aab2280ca92 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -23,9 +23,14 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
+void Aidge::Reshape_OpImpl::forward() {
+    const Reshape_Op& op = dynamic_cast<const Reshape_Op&>(mOp);
+    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+}
+
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
-void Aidge::Reshape_Op::computeOutputDims() {
+bool Aidge::Reshape_Op::forwardDims(bool /*allowDataDependency*/) {
     // check input has been associated
     if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
@@ -58,10 +63,18 @@ void Aidge::Reshape_Op::computeOutputDims() {
         }
 
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Reshape_Op, *this, name);
+    if (Registrar<Reshape_Op>::exists({name})){
+        SET_IMPL_MACRO(Reshape_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Reshape_OpImpl>(*this);
+    }
     mOutputs[0]->setBackend(name, device);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index 8b0d6f9db698e36d232dec38fd8cdd0fad5f8c59..dc5e272210feb09fd5dac6ba4b16f9ba8dc93bf0 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -21,6 +21,6 @@
 const std::string Aidge::Scaling_Op::Type = "Scaling";
 
 void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Scaling_Op>::create(name)(*this);
+    SET_IMPL_MACRO(Scaling_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
\ No newline at end of file
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 6d2670695b2ffe9acbf09edd3e82f8549a4184f0..97ec0a5171a8f13fee0a93557b6831443f10713a 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -22,9 +22,78 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
+void Aidge::Slice_OpImpl::forward() {
+    const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp);
+    const auto inputDims = op.getInput(0)->dims();
+    auto slicedDims = op.getInput(0)->dims();
+
+    std::size_t beginning = 0;
+    DimSize_t nbAxes = op.getAttr<SliceAttr::Axes>().size();
+    for (std::size_t i = 0; i < nbAxes; ++i) {
+        // For each slice operation get the params and cast them to size_t
+        const std::int64_t axis_ = op.getAttr<SliceAttr::Axes>()[i];
+        const std::int64_t start_ = op.getAttr<SliceAttr::Starts>()[i];
+        const std::int64_t end_ = op.getAttr<SliceAttr::Ends>()[i];
+        const std::size_t axis = axis_ >= 0 ? axis_ : static_cast<std::size_t>(axis_) + inputDims.size();
+        const std::size_t start = start_ >= 0 ? start_ : start_ + inputDims[axis];
+        const std::size_t end = end_ >= 0 ? end_ : end_ + inputDims[axis];
+        std::size_t stride = 1;
+        for (std::size_t j = inputDims.size() - 1; j > axis; --j) stride *= inputDims[j];
+        beginning += start * stride;
+        const std::size_t sliceLength = end - start + 1;
+        slicedDims[axis] = sliceLength;
+    }
+
+    const std::size_t nbDims = slicedDims.size();
+
+    // for inputDims = {4,5,5,3} & slicedDims = {3,2,2,1}, substractDims = {1,5,5,3}
+    std::vector<std::size_t> substractedDims = std::vector<std::size_t>(nbDims);
+    for (std::size_t i = 0; i < nbDims; ++i) {
+        substractedDims[i] = inputDims[i] - slicedDims[i];
+    }
+
+    // for slicedDims = {3,2,2,1}, prodSlicedDims = {12,4,2,1}
+    std::vector<std::size_t> prodSlicedDims = std::vector<std::size_t>(nbDims);
+    std::vector<std::size_t> prodInputDims = std::vector<std::size_t>(nbDims + 1);
+    prodSlicedDims[nbDims - 1] = slicedDims[nbDims - 1];
+    prodInputDims[nbDims - 1] = inputDims[nbDims - 1];
+    prodInputDims[nbDims] = 1;
+    for (std::size_t i = 2; i <= nbDims; ++i) {
+        prodSlicedDims[nbDims - i] = prodSlicedDims[nbDims - i + 1] * slicedDims[nbDims - i];
+        prodInputDims[nbDims - i] = prodInputDims[nbDims - i + 1] * inputDims[nbDims - i];
+    }
+
+    std::size_t i = beginning;
+    std::size_t size = 0;
+    std::size_t offset = 0;
+    for (std::size_t j = 0; j < prodSlicedDims[0];) {
+        ++size;
+        ++i;
+        ++j;
+        bool newChunk = false;
+        for (std::size_t idx = nbDims - 1; idx > 0; --idx) {
+            if (j % prodSlicedDims[idx] == 0) {
+                i += substractedDims[idx] * prodInputDims[idx + 1];
+                newChunk = true;
+            }
+        }
+
+        if (newChunk) {
+            op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(beginning), size, offset);
+            beginning = i;
+            offset += size;
+            size = 0;
+        }
+    }
+
+    if (size > 0) {
+        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(beginning), size, offset);
+    }
+}
+
 const std::string Aidge::Slice_Op::Type = "Slice";
 
-void Aidge::Slice_Op::computeOutputDims() {
+bool Aidge::Slice_Op::forwardDims(bool /*allowDataDependency*/) {
     // check input have been associated
     if (!getInput(0) || (getInput(0)->empty())) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
@@ -50,4 +119,15 @@ void Aidge::Slice_Op::computeOutputDims() {
         outDims[axis] = sliceLength;
     }
     mOutputs[0]->resize(outDims);
+    return true;
+}
+
+void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Slice_Op>::exists({name})){
+        SET_IMPL_MACRO(Slice_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Slice_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index 0c12e6a1fdb7f3b1056e19bf694996d0061b5b04..b977f4ee7ccce32d7f7929cbee99140aea36cd2f 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -24,7 +24,7 @@
 
 const std::string Aidge::Sub_Op::Type = "Sub";
 
-void Aidge::Sub_Op::computeOutputDims() {
+bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
@@ -46,13 +46,17 @@ void Aidge::Sub_Op::computeOutputDims() {
                 outDims[out_id] = lowDims[low_id];
             }
             else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Sub Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
             }
             --out_id;
             --low_id;
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..08c4770e3fb43fe819a924dd963356401c3ce801
--- /dev/null
+++ b/src/operator/Transpose.cpp
@@ -0,0 +1,89 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Transpose.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int64_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::Transpose_OpImpl::forward() {
+    const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
+    const auto inputDims = op.getInput(0)->dims();
+    const auto outputDims = op.getOutput(0)->dims();
+
+    std::vector<std::size_t> outStrides(outputDims.size(), 1);
+    for (size_t i = 0; i < outputDims.size(); ++i) {
+        for (size_t j = i+1; j < outputDims.size(); ++j)
+        {
+            outStrides[i] *= outputDims[j];
+        }
+    }
+
+    std::vector<size_t> indices(outputDims.size(), 0);
+    for (size_t i = 0; i < op.getInput(0)->size(); ++i) {
+        size_t idx = 0;
+        // Permute indices based on OutputDimsOrder attr
+        for (int j = outputDims.size() -1; j >=0; --j) {
+            idx += indices[op.getAttr<std::vector<DimSize_t>>(0)[j]] * outStrides[j];
+        }
+        // Copy the value in output
+        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i), 1, idx);
+
+        // Update indices for the next iteration
+        for (int j = outputDims.size() - 1; j >= 0; --j) {
+            if (indices[j] < inputDims[j] - 1) {
+                indices[j]++;
+                break;
+            } else {
+                indices[j] = 0;
+            }
+        }
+    }
+}
+
+const std::string Aidge::Transpose_Op::Type = "Transpose";
+
+bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
+    // check input has been associated
+    if (!getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
+    }
+
+    if (!getInput(0)->empty()) {
+        const auto& outDimsOrder = getAttr<std::vector<DimSize_t>>(0);
+        std::vector<DimSize_t> outputDims;
+        for (std::size_t i = 0; i < outDimsOrder.size(); ++i) {
+            outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
+        }
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+    return false;
+}
+
+void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Transpose_Op>::exists({name})){
+        SET_IMPL_MACRO(Transpose_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Transpose_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/FuseMulAdd.cpp
index 6582038e981bb58534d04ded57052f6a0f83e9a9..9a89f8af583fa8567f62ed7c8f42d160d80a1e99 100644
--- a/src/recipes/FuseMulAdd.cpp
+++ b/src/recipes/FuseMulAdd.cpp
@@ -85,8 +85,12 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     AIDGE_ASSERT(outSize, "Couldnt get output number of channels for FC operator.");
 
     // Instanciate FC
-    //std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
-    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(outSize, bias ? false : true));
+    std::string fcName = matmulNode->name();
+    if (!addNode->name().empty()) {
+        fcName += "_" + addNode->name();
+    }
+
+    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(outSize, bias ? false : true), fcName);
 
     // Step 2 : Branch existing producers & create the others
     // link weights & bias
diff --git a/src/recipes/GraphViewHelper.cpp b/src/recipes/GraphViewHelper.cpp
index 3b42db7fe18d2269b95cf35fd92851d1e3684bad..b0c99bffb895dc64b20d76991911ae5f4b604c85 100644
--- a/src/recipes/GraphViewHelper.cpp
+++ b/src/recipes/GraphViewHelper.cpp
@@ -51,7 +51,7 @@ void Aidge::compile_gradient(std::shared_ptr<Aidge::GraphView> gv) {
         AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Cannot instanciate gradient of an Operator ({}) that doesn't use Tensor.", node->getOperator()->type());
         const std::shared_ptr<OperatorTensor> op = std::dynamic_pointer_cast<OperatorTensor>(node -> getOperator());
         for (std::size_t o = 0; o < node -> nbOutputs(); ++o) {
-            op->getOutput(o)->initGradient();
+            op->getOutput(o)->initGrad();
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp
index 8e27fea58014b4ec16729f3593dd656026e16826..7959e1b70acab617b9c6f92160c6d501712f5945 100644
--- a/src/recipes/HorizontalTiling.cpp
+++ b/src/recipes/HorizontalTiling.cpp
@@ -41,7 +41,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
     if (op->nbOutputs() != 1 || op->nbData() > 1) {
         AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now.");
     }
-    if (!op->outputDimsForwarded()) {
+    if (!op->dimsForwarded()) {
         AIDGE_INTERNAL_ASSERT("Dimensions must be forwarded before any tiling");
     }
     // start by doing a tiling with strict dimensions division
diff --git a/src/scheduler/ParallelScheduler.cpp b/src/scheduler/ParallelScheduler.cpp
index 1dd13fe2100122002d4ed068ada4851b1bfba463..4e515099006b9e0588eafc7e981c5f5e80bbe97d 100644
--- a/src/scheduler/ParallelScheduler.cpp
+++ b/src/scheduler/ParallelScheduler.cpp
@@ -28,7 +28,7 @@
 #include "aidge/operator/Memorize.hpp"
 #include "aidge/operator/MetaOperator.hpp"
 
-void Aidge::ParallelScheduler::forward(bool forwardDims, std::vector<std::shared_ptr<Aidge::Tensor>> data) {
+void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std::shared_ptr<Aidge::Tensor>>& data) {
     // Collect all data input of the graph (that are producers)
     if (!data.empty()){
         connectInputs(data);
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 4e3f9978837120bd01a3de2cfe2d22e33f9d7828..af10e3dcd3ead044f8619c40570936f53039d9a2 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -195,7 +195,9 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S
             // be put back in the consumers list once the remaining consumers
             // have been exhausted.
             bool isStillConsumer = false;
-            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) {
+            // Only look for data inputs. If no data is available on data input,
+            // by definition, no parameter can be consumed on parameter inputs.
+            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbData(); ++inputIdx) {
                 AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx);
 
                 if (consumer->getOperator()->getNbConsumedData(inputIdx) <
@@ -280,7 +282,12 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S
     mPriorCache.clear();
 
     if (!consumers.empty()) {
-        Log::warn("Remaining consumers: possible dead-lock");
+        std::vector<std::string> consumersName;
+        std::transform(consumers.begin(), consumers.end(),
+            std::back_inserter(consumersName),
+            [&namePtrTable](auto val){ return namePtrTable.at(val); });
+
+        Log::warn("Remaining consumers: {}. Possible dead-lock.", consumersName);
     }
 
     return schedule;
@@ -491,17 +498,17 @@ Aidge::MemoryManager Aidge::Scheduler::generateMemory(bool incProducers, bool wr
                 const MemoryManager::MemoryPlane& memPlane
                     = (wrapAroundBuffer && wrapAroundSize > 0)
                         ? (*wrapAroundMemPlane[outputIdx]) :
-                            memManager.allocate(requiredSize.data, childs, stride, length, count);
+                            memManager.allocate(size, childs, stride, length, count);
 
                 if (wrapAroundBuffer && wrapAroundSize > 0) {
                     memManager.reallocate(memPlane,
                         node, 0,
-                        requiredSize.data, true, wrapAroundExtra, childs, stride, length, count);
+                        size, true, wrapAroundExtra, childs, stride, length, count);
                 }
                 else {
                     memManager.reallocate(memPlane.memSpace,
                         node, memPlane.offset,
-                        requiredSize.data, false, 0, childs, stride, length, count);
+                        size, false, 0, childs, stride, length, count);
                 }
             }
 
@@ -513,12 +520,23 @@ Aidge::MemoryManager Aidge::Scheduler::generateMemory(bool incProducers, bool wr
     return memManager;
 }
 
-void Aidge::Scheduler::connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data){
+void Aidge::Scheduler::connectInputs(const std::vector<std::shared_ptr<Aidge::Tensor>>& data){
     // This version of connect inputs only connects tensor inputs in input data producers.
     auto inputNodes = mGraphView->getOrderedInputs();
 
     // Assert that the number of input data producers corresponds to the number of data input
-    assert(data.size() == inputNodes.size()  && "Scheduler connectInput error - Inconsistent number of graph inputs and inputs passed to the graph");
+    if (data.size() != inputNodes.size()) {
+        const std::map<std::shared_ptr<Node>, std::string> namePtrTable
+            = mGraphView->getRankedNodesName("{0} ({1}#{3})");
+
+        std::vector<std::pair<std::string, IOIndex_t>> inputNodesName;
+        std::transform(inputNodes.begin(), inputNodes.end(),
+            std::back_inserter(inputNodesName),
+            [&namePtrTable](auto val){ return std::make_pair(namePtrTable.at(val.first), val.second); });
+
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Provided {} inputs to the scheduler, but graph has {} inputs (required inputs in order: )",
+            data.size(), inputNodes.size(), inputNodesName);
+    }
 
     for (std::size_t i = 0; i < data.size(); ++i){
         // TODO : maybe shallow copy instead of deepcopy
diff --git a/src/scheduler/SequentialScheduler.cpp b/src/scheduler/SequentialScheduler.cpp
index 801f46ffb0293696dad8a84908bdda2bbd789bfc..74b1b3f0c6e9be164792460669821744661c15b3 100644
--- a/src/scheduler/SequentialScheduler.cpp
+++ b/src/scheduler/SequentialScheduler.cpp
@@ -28,7 +28,7 @@
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/recipes/GraphViewHelper.hpp"
 
-void Aidge::SequentialScheduler::forward(bool forwardDims, std::vector<std::shared_ptr<Aidge::Tensor>> data) {
+void Aidge::SequentialScheduler::forward(bool forwardDims, const std::vector<std::shared_ptr<Aidge::Tensor>>& data) {
     // Collect all data input of the graph (that are producers)
     if (!data.empty()){
         connectInputs(data);
@@ -73,21 +73,12 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, std::vector<std::shar
     }
 }
 
-void Aidge::SequentialScheduler::backward(std::vector<std::shared_ptr<Aidge::Tensor>> data, bool instanciateGrad) {
+void Aidge::SequentialScheduler::backward(bool instanciateGrad) {
     // create ad set Grad values
     if (instanciateGrad) { compile_gradient(mGraphView); }
 
-    const auto& ordered_outputs = mGraphView->getOrderedOutputs();
-    AIDGE_ASSERT(ordered_outputs.size() == data.size(), "You must provide the \
-                   right number of data objects to run the backward function. \
-                   {} outputs detected for the current GraphView when {} were \
-                   provided.", ordered_outputs.size(), data.size());
-    for (std::size_t i = 0; i < ordered_outputs.size(); ++i) {
-        const std::shared_ptr<OperatorTensor> op_ = std::dynamic_pointer_cast<OperatorTensor>(ordered_outputs[i].first->getOperator());
-        const std::shared_ptr<Tensor> t_grad = op_->getOutput(ordered_outputs[i].second)->grad();
-        AIDGE_ASSERT(data[i]->dims() == t_grad->dims(), "Wrong gradient size.");
-        *t_grad = data[i]->clone();
-    }
+    // TODO: Check output grad are not empty
+
     // Generate scheduling *only if empty*
     // If scheduling was already generated (in one or several steps, i.e. one or
     // several successive call to generateScheduling()), do not generate it twice
diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
index 03ecded8f5a193a8ab00cf9dc7be502b98205de2..54af888caca8dc2c4b512515ff70663f9437dd45 100644
--- a/src/utils/Log.cpp
+++ b/src/utils/Log.cpp
@@ -56,7 +56,7 @@ void Aidge::Log::log(Level level, const std::string& msg) {
         // the log file.
         const auto modifier
             = (level == Debug) ? fmt::fg(fmt::color::gray)
-            : (level == Notice) ? fmt::fg(fmt::color::light_yellow)
+            : (level == Notice) ? fmt::fg(fmt::color::medium_purple)
             : (level == Warn) ? fmt::fg(fmt::color::orange)
             : (level == Error) ? fmt::fg(fmt::color::red)
             : (level == Fatal) ? fmt::bg(fmt::color::red)
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 437780b959b37e0cf6b5b7796e71c9b931f25bc0..8403686d16da15e7e8ad4616029a241d6197d450 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -648,11 +648,8 @@ TEST_CASE("[GraphView] clone") {
     auto conv1 = Conv(3, 32, {3, 3}, "conv1");
     auto conv2 = Conv(32, 64, {3, 3}, "conv2");
     auto conv3 = Conv(64, 10, {1, 1}, "conv3");
-    auto g1 = std::make_shared<GraphView>("TestGraph");
+    auto g1 = Sequential({conv1, conv2, conv3});
     dataProvider->addChild(conv1, 0);
-    g1->add(conv1);
-    g1->addChild(conv2, conv1, 0);
-    g1->addChild(conv3, conv2, 0);
     g1->save("clone_g1");
 
     SECTION("Check input-output connections") {
diff --git a/unit_tests/operator/Test_ConcatImpl.cpp b/unit_tests/operator/Test_ConcatImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..184c02d5208c99b903cf838784bb14fb65799111
--- /dev/null
+++ b/unit_tests/operator/Test_ConcatImpl.cpp
@@ -0,0 +1,143 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Concat.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
+    SECTION("Concat 1D inputs") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array1D<int,2>{{ 2, 3 }});
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array1D<int,3>{{ 4, 5, 6 }});
+        std::shared_ptr<Tensor> input3 = std::make_shared<Tensor>(Array1D<int,4>{{ 7, 8, 9, 10 }});
+        std::shared_ptr<Tensor> input4 = std::make_shared<Tensor>(Array1D<int,5>{{ 11, 12, 13, 14, 15 }});
+        std::shared_ptr<Tensor> input5 = std::make_shared<Tensor>(Array1D<int,6>{{ 16, 17, 18, 19, 20, 21 }});
+
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,20>{
+            { 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20,21 }});
+
+        auto myConcat = Concat(5, 0);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        myConcat->getOperator()->associateInput(2, input3);
+        myConcat->getOperator()->associateInput(3, input4);
+        myConcat->getOperator()->associateInput(4, input5);
+        myConcat->getOperator()->setBackend("cpu");
+        myConcat->getOperator()->setDataType(DataType::Int32);
+        myConcat->forward();
+
+        std::static_pointer_cast<Tensor>(myConcat->getOperator()->getRawOutput(0))->print();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
+    }
+    SECTION("Concat 4D inputs on 1st axis") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
+            {                                       //
+                {                                   //
+                    {{20, 47},{21, 48},{22, 49}},   //
+                    {{23, 50},{24, 51},{25, 52}},   //
+                    {{26, 53},{27, 54},{28, 55}}    //
+                },                                  //
+            }                                       //
+        });                                         //
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array4D<int,2,3,3,2> {
+            {
+                {                                   //
+                    {{29, 56},{30, 57},{31, 58}},   //
+                    {{32, 59},{33, 60},{34, 61}},   //
+                    {{35, 62},{36, 63},{37, 64}}    //
+                },                                  //
+                {                                   //
+                    {{38, 65},{39, 66},{40, 67}},   //
+                    {{41, 68},{42, 69},{43, 70}},   //
+                    {{44, 71},{45, 72},{46, 73}}    //
+                }                                   //
+            }                                       //
+        });                                         //
+
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
+            {                                       //
+                {                                   //
+                    {{20, 47},{21, 48},{22, 49}},   //
+                    {{23, 50},{24, 51},{25, 52}},   //
+                    {{26, 53},{27, 54},{28, 55}}    //
+                },                                  //
+                {                                   //
+                    {{29, 56},{30, 57},{31, 58}},   //
+                    {{32, 59},{33, 60},{34, 61}},   //
+                    {{35, 62},{36, 63},{37, 64}}    //
+                },                                  //
+                {                                   //
+                    {{38, 65},{39, 66},{40, 67}},   //
+                    {{41, 68},{42, 69},{43, 70}},   //
+                    {{44, 71},{45, 72},{46, 73}}    //
+                }                                   //
+            }                                       //
+        });                                         //
+
+        auto myConcat = Concat(2, 0);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        myConcat->getOperator()->setBackend("cpu");
+        myConcat->getOperator()->setDataType(DataType::Int32);
+        myConcat->forward();
+
+        std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0)->print();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
+    }
+
+    SECTION("Concat 4D inputs on 3rd axis") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
+            {                                       //
+                {                                   //
+                    {{20, 47},{21, 48},{22, 49}},   //
+                    {{23, 50},{24, 51},{25, 52}},   //
+                    {{26, 53},{27, 54},{28, 55}}    //
+                },                                  //
+            }                                       //
+        });                                         //
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array4D<int,1,3,6,2> {
+            {
+                {                                   //
+                    {{29, 56},{30, 57},{31, 58},{38, 65},{39, 66},{40, 67}},   //
+                    {{32, 59},{33, 60},{34, 61},{41, 68},{42, 69},{43, 70}},   //
+                    {{35, 62},{36, 63},{37, 64},{44, 71},{45, 72},{46, 73}}    //
+                },
+            }
+        });
+
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,1,3,9,2> {
+            {                                                                                             //
+                {                                                                                         //
+                    {{20, 47},{21, 48},{22, 49},{29, 56},{30, 57},{31, 58},{38, 65},{39, 66},{40, 67}},   //
+                    {{23, 50},{24, 51},{25, 52},{32, 59},{33, 60},{34, 61},{41, 68},{42, 69},{43, 70}},   //
+                    {{26, 53},{27, 54},{28, 55},{35, 62},{36, 63},{37, 64},{44, 71},{45, 72},{46, 73}}    //
+                },                                                                                        //
+            }                                                                                             //
+        });                                                                                               //
+
+        auto myConcat = Concat(2, 2);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        myConcat->getOperator()->setBackend("cpu");
+        myConcat->getOperator()->setDataType(DataType::Int32);
+        myConcat->forward();
+
+        std::static_pointer_cast<Tensor>(myConcat->getOperator()->getRawOutput(0))->print();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_Div_Op.cpp b/unit_tests/operator/Test_Div_Op.cpp
index e659742c0bd200fa33b598f581cfef7b2f1e432e..d11f72474b0b70bf335dfee95d13a9b41cfe6efb 100644
--- a/unit_tests/operator/Test_Div_Op.cpp
+++ b/unit_tests/operator/Test_Div_Op.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 
 namespace Aidge {
-TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]") {
+TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
@@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
 
     /**
      * @todo Special case: scalar not handled yet by
-     * ``OperatorTensor::computeOutputDims()``
+     * ``OperatorTensor::forwardDims()``
      */
     // SECTION("Scalar / Scalar") {
     //     // input_0
@@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
     //     // input_1
     //     T1->resize({});
 
-    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE_NOTHROW(op->forwardDims());
     //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
     // }
     // SECTION("Scalar / +1-D") {
@@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
     //         }
     //         T1->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
     //         }
     //         T0->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
 
             T0->resize(dims0);
             T1->resize(dims0);
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dims0);
         }
 
@@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
             T0->resize(dims0);
             T1->resize(dims1);
 
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dimsOut);
 
             // input_0 - wrong
@@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
             }
             T1->resize(dims1_wrong);
             REQUIRE(dims0 != dims1_wrong);
-            REQUIRE_THROWS(op->computeOutputDims());
+            REQUIRE_THROWS(op->forwardDims());
         }
     }
 }
diff --git a/unit_tests/operator/Test_GatherImpl.cpp b/unit_tests/operator/Test_GatherImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2995963a35cda5b0c5794b1d15e4064438b58ece
--- /dev/null
+++ b/unit_tests/operator/Test_GatherImpl.cpp
@@ -0,0 +1,96 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Gather.hpp"
+
+#include <memory>
+
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Gather(forward)") {
+    SECTION("2D Tensor axis 0") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> {
+            {
+                {1, 2, 3},
+                {4, 5, 6},
+                {7, 8, 9}
+            }
+        });
+        std::shared_ptr<Tensor> indexes = std::make_shared<Tensor>(Array2D<int,1,2> {
+            {
+                {1, 2}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,1,2,3> {
+            {
+                {
+                    {4, 5, 6},
+                    {7, 8, 9}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> myGather = Gather({1, 2}, {1, 2}, 0);
+        auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
+        op->associateInput(0,input);
+        // op->associateInput(1,indexes);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myGather->forward();
+        op->getOutput(0)->print();
+        expectedOutput->print();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+
+    }
+    SECTION("2D Tensor axis 1") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> {
+            {
+                {1, 2, 3},
+                {4, 5, 6},
+                {7, 8, 9}
+            }
+        });
+        std::shared_ptr<Tensor> indexes = std::make_shared<Tensor>(Array2D<int,1,2> {
+            {
+                {0, 2}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,3,1,2> {
+            {
+                {
+                    {1, 3}
+                },
+                {
+                    {4, 6}
+                },
+                {
+                    {7, 9}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> myGather = Gather({0, 2}, {1, 2}, 1);
+        auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
+        op->associateInput(0,input);
+        // op->associateInput(1,indexes);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myGather->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
index fcd8489144be121633f2b0a9601dee171e2bdb5e..d20f689aba55d8cbaef553388d4666fd6c1d7172 100644
--- a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
+++ b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
@@ -21,8 +21,8 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)",
-          "[GlobalAveragePooling][computeOutputDims]") {
+TEST_CASE("[core/operator] GlobalAveragePooling_Op(forwardDims)",
+          "[GlobalAveragePooling][forwardDims]") {
   constexpr std::uint16_t NB_TRIALS = 10;
   // Create a random number generator
   std::random_device rd;
@@ -39,7 +39,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)",
   // input_0
   std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
   SECTION("Un-connected input leads to failure.") {
-    REQUIRE_THROWS(op->computeOutputDims());
+    REQUIRE_THROWS(op->forwardDims());
   }
   op->associateInput(0, input_T);
 
@@ -49,7 +49,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)",
         const std::size_t nb_dims = 0;
         std::vector<std::size_t> dims(nb_dims);
         input_T->resize(dims);
-        REQUIRE_NOTHROW(op->computeOutputDims());
+        REQUIRE_NOTHROW(op->forwardDims());
       }
     }
     SECTION("Full tensor") {
@@ -61,7 +61,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)",
             dims[i] = dimsDist(gen);
           }
           input_T->resize(dims);
-          REQUIRE_THROWS(op->computeOutputDims());
+          REQUIRE_THROWS(op->forwardDims());
         }
       }
       SECTION("nbDim > 3") {
@@ -74,7 +74,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)",
           std::vector<DimSize_t> dims_out{dims[0], dims[1]};
           input_T->resize(dims);
           op->setInput(0, input_T);
-          REQUIRE_NOTHROW(op->computeOutputDims());
+          REQUIRE_NOTHROW(op->forwardDims());
           REQUIRE(op->getOutput(0)->dims() == dims_out);
           REQUIRE((op->getOutput(0)->dims().size()) == static_cast<size_t>(2));
         }
diff --git a/unit_tests/operator/Test_MatMul_Op.cpp b/unit_tests/operator/Test_MatMul_Op.cpp
index 6c810e675ad46cc5580bd24e57f7e7dbb84db38f..bdd1de87c27351e943c59fa616c40dc4a0001abc 100644
--- a/unit_tests/operator/Test_MatMul_Op.cpp
+++ b/unit_tests/operator/Test_MatMul_Op.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 
 namespace Aidge {
-TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutputDims]") {
+TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
     // Create a random number generator
     std::random_device rd;
     std::mt19937 gen(rd());
@@ -43,13 +43,13 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu
     //     T1->resize({});
     //     op -> associateInput(1,T1);
 
-    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE_NOTHROW(op->forwardDims());
     //     REQUIRE((op->getOutput(0)->dims()).empty());
 
     //     // input_1 - wrong
     //     T1->resize({dist(gen)});
 
-    //     REQUIRE_THROWS(op->computeOutputDims());
+    //     REQUIRE_THROWS(op->forwardDims());
     // }
 
     SECTION("1-D / N-D") {
@@ -66,26 +66,26 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu
             // input_1 - right
             T1->resize({dim0});
 
-            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE_NOTHROW(op -> forwardDims());
             REQUIRE((op->getOutput(0)->dims()).empty());
 
             // input_1 - wrong
             T1->resize({dim0+1});
 
-            REQUIRE_THROWS(op -> computeOutputDims());
+            REQUIRE_THROWS(op -> forwardDims());
         }
         SECTION("1-D / 2-D") {
             // input_1 - right
             const std::size_t dim1 = dist(gen);
             T1->resize({dim0,dim1});
 
-            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE_NOTHROW(op -> forwardDims());
             REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1}));
 
             // input_1 - wrong
             T1->resize({dim0+1,dim1});
 
-            REQUIRE_THROWS(op -> computeOutputDims());
+            REQUIRE_THROWS(op -> forwardDims());
         }
         SECTION("1-D / +2-D") {
             // input_1 - right
@@ -94,7 +94,7 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu
             const std::size_t dim3 = dist(gen);
             T1->resize({dim1,dim2,dim0,dim3});
 
-            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE_NOTHROW(op -> forwardDims());
             REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1,dim2,dim3}));
         }
     }
@@ -114,26 +114,26 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu
             // input_1 - right
             T1->resize({dim1});
 
-            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE_NOTHROW(op -> forwardDims());
             REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0}));
 
             // input_1 - wrong
             T1->resize({dim1+1});
 
-            REQUIRE_THROWS(op -> computeOutputDims());
+            REQUIRE_THROWS(op -> forwardDims());
         }
         SECTION("2-D / 2-D") {
             // input_1 - right
             const std::size_t dim2 = dist(gen);
             T1->resize({dim1, dim2});
 
-            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE_NOTHROW(op -> forwardDims());
             REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim2}));
 
             // input_1 - wrong
             T1->resize({dim1+1,dim2});
 
-            REQUIRE_THROWS(op -> computeOutputDims());
+            REQUIRE_THROWS(op -> forwardDims());
         }
         SECTION("2-D / +2-D") {
             // input_1 - right
@@ -142,13 +142,13 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu
             const std::size_t dim4 = dist(gen);
             T1->resize({dim3,dim4,dim1, dim2});
 
-            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE_NOTHROW(op -> forwardDims());
             REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim3,dim4,dim0,dim2}));
 
             // input_1 - wrong
             T1->resize({dim3,dim4,dim1+1,dim2});
 
-            REQUIRE_THROWS(op -> computeOutputDims());
+            REQUIRE_THROWS(op -> forwardDims());
         }
     }
     SECTION("+2-D / +2-D") {
@@ -169,28 +169,28 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu
         // 1
         const std::size_t dim5 = dist(gen);
         T1->resize({dim0,dim1,dim3,dim5});
-        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE_NOTHROW(op -> forwardDims());
         REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5}));
 
         // 2 - input_1 broadcast
         T1->resize({1,dim1,dim3,dim5});
-        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE_NOTHROW(op -> forwardDims());
         REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5}));
 
         // 3 - input_0 broadcast
         const std::size_t dim1_bigger = dist(gen) + 1;
         T1->resize({dim0,dim1_bigger,dim3,dim5});
-        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE_NOTHROW(op -> forwardDims());
         REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5}));
 
         // 4 - input_0+input_1 broadcast
         T1->resize({1,dim1_bigger,dim3,dim5});
-        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE_NOTHROW(op -> forwardDims());
         REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5}));
 
         // input_1 - wrong
         T1->resize({dim0+1,dim1,dim3,dim5});
-        REQUIRE_THROWS(op -> computeOutputDims());
+        REQUIRE_THROWS(op -> forwardDims());
     }
 }
 } // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index cd42791e0db1d95469bdd414cab94f1c6e8fea17..ed4afafe39a367ecabb25ff949eb3d03999d1ea9 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -9,6 +9,12 @@
  *
  ********************************************************************************/
 
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <string>
+#include <utility>  // std::pair
+#include <vector>
+
 #include <catch2/catch_test_macros.hpp>
 
 #include "aidge/operator/Pop.hpp"
@@ -17,7 +23,6 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Testing.hpp"
 #include "aidge/recipes/Recipes.hpp"
-#include <cstddef>
 
 using namespace Aidge;
 
@@ -37,13 +42,12 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         REQUIRE(op->nbData() == 1);
         REQUIRE(op->nbOutputs() == 1);
 
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>();
-        myInput->resize({2,3,5,5});
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(std::vector<std::size_t>({2,1,5,5}));
         std::shared_ptr<OperatorTensor> opTensor = std::static_pointer_cast<OperatorTensor>(op->getOperator());
         opTensor->associateInput(0,myInput);
-        opTensor->computeOutputDims();
+        opTensor->forwardDims();
 
-        REQUIRE(opTensor->outputDimsForwarded());
+        REQUIRE(opTensor->dimsForwarded());
         REQUIRE(std::static_pointer_cast<Tensor>(opTensor->getRawOutput(0))->dims() == std::vector<size_t>({2,3,5,5}));
         REQUIRE(std::static_pointer_cast<Tensor>(opTensor->getRawInput(0)) == myInput);
         REQUIRE(microGraph->getOrderedInputs()[0].first->getOperator()->getRawInput(0) == myInput);
@@ -74,9 +78,9 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         op->associateInput(17, myInit);
         op->associateInput(18, myInit);
 
-        op->computeOutputDims();
+        op->forwardDims();
         microGraph->save("lstm_dims", true, true);
-        REQUIRE(op->outputDimsForwarded());
+        REQUIRE(op->dimsForwarded());
 
         //op->updateConsummerProducer();  // require implementation
         //auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraphScheduler();
diff --git a/unit_tests/operator/Test_Mul_Op.cpp b/unit_tests/operator/Test_Mul_Op.cpp
index d3e0c5e086fac9d31db817d628214e95d4e41a32..f3f8fb9522943d0a9574cb80cfc228135a973890 100644
--- a/unit_tests/operator/Test_Mul_Op.cpp
+++ b/unit_tests/operator/Test_Mul_Op.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 
 namespace Aidge {
-TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]") {
+TEST_CASE("[core/operator] Mul_Op(forwardDims)", "[Mul][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
@@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
 
     /**
      * @todo Special case: scalar not handled yet by
-     * ``OperatorTensor::computeOutputDims()``
+     * ``OperatorTensor::forwardDims()``
      */
     // SECTION("Scalar / Scalar") {
     //     // input_0
@@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
     //     // input_1
     //     T1->resize({});
 
-    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE_NOTHROW(op->forwardDims());
     //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
     // }
     // SECTION("Scalar / +1-D") {
@@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
     //         }
     //         T1->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
     //         }
     //         T0->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
 
             T0->resize(dims0);
             T1->resize(dims0);
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dims0);
         }
 
@@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
             T0->resize(dims0);
             T1->resize(dims1);
 
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dimsOut);
 
             // input_0 - wrong
@@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
             }
             T1->resize(dims1_wrong);
             REQUIRE(dims0 != dims1_wrong);
-            REQUIRE_THROWS(op->computeOutputDims());
+            REQUIRE_THROWS(op->forwardDims());
         }
     }
 }
diff --git a/unit_tests/operator/Test_Pow_Op.cpp b/unit_tests/operator/Test_Pow_Op.cpp
index c77615c11e99c174707df21560044fdd3b6a3c42..4a8d242a355cda58c7b36914efdb1304220f713a 100644
--- a/unit_tests/operator/Test_Pow_Op.cpp
+++ b/unit_tests/operator/Test_Pow_Op.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 
 namespace Aidge {
-TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]") {
+TEST_CASE("[core/operator] Pow_Op(forwardDims)", "[Pow][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
@@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
 
     /**
      * @todo Special case: scalar not handled yet by
-     * ``OperatorTensor::computeOutputDims()``
+     * ``OperatorTensor::forwardDims()``
      */
     // SECTION("Scalar / Scalar") {
     //     // input_0
@@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
     //     // input_1
     //     T1->resize({});
 
-    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE_NOTHROW(op->forwardDims());
     //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
     // }
     // SECTION("Scalar / +1-D") {
@@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
     //         }
     //         T1->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
     //         }
     //         T0->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
 
             T0->resize(dims0);
             T1->resize(dims0);
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dims0);
         }
 
@@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
             T0->resize(dims0);
             T1->resize(dims1);
 
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dimsOut);
 
             // input_0 - wrong
@@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
             }
             T1->resize(dims1_wrong);
             REQUIRE(dims0 != dims1_wrong);
-            REQUIRE_THROWS(op->computeOutputDims());
+            REQUIRE_THROWS(op->forwardDims());
         }
     }
 }
diff --git a/unit_tests/operator/Test_ReshapeImpl.cpp b/unit_tests/operator/Test_ReshapeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5d28005eb40534742aae495948e5269373b81ad1
--- /dev/null
+++ b/unit_tests/operator/Test_ReshapeImpl.cpp
@@ -0,0 +1,67 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Reshape.hpp"
+
+#include <memory>
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Reshape(forward)") {
+    SECTION("1D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> {
+            {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> {
+            {
+                {1.0, 2.0, 3.0},
+                {4.0, 5.0, 6.0}
+            }
+        });
+
+        std::shared_ptr<Node> myReshape = Reshape({2, 3});
+        auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+        op->associateInput(0, input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        myReshape->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+    }
+    SECTION("2D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,3> {
+            {
+                {1.0, 2.0, 3.0},
+                {4.0, 5.0, 6.0}
+            }
+
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
+            {
+                {1.0, 2.0},
+                {3.0, 4.0},
+                {5.0, 6.0}
+            }
+        });
+
+        std::shared_ptr<Node> myReshape = Reshape({3, 2});
+        auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+        op->associateInput(0, input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        myReshape->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_SliceImpl.cpp b/unit_tests/operator/Test_SliceImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..91ae92848b552a6038a4cb5f8dd3848b20ac2168
--- /dev/null
+++ b/unit_tests/operator/Test_SliceImpl.cpp
@@ -0,0 +1,160 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Slice.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
+    SECTION("1D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
+            {0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,4> {
+            {0, 1, 2,-3}
+        });
+
+        std::shared_ptr<Node> mySlice = Slice({0}, {3}, {0});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("2D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array2D<int,2,10> {
+            {
+                { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<int,2,3> {
+            {
+                {-5,-6, 7},
+                {-5,-6, 7}
+            }
+        });
+
+        std::shared_ptr<Node> mySlice = Slice({0,5}, {1,7}, {0,1});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+        // mySlice->getOperator()->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("3D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<int,2,2,10> {
+            {
+                {
+                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                },
+                {
+                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,1,1,3> {
+            {
+                {
+                    { 4,-5,-6}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> mySlice = Slice({0,1,4}, {0,1,6}, {0,1,2});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+        // mySlice->getOperator()->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("4D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
+            {
+                {
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    }
+                },
+                {
+                    {
+                        { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3,11,-5,-6, 7,-1,10}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
+            {
+                {
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    }
+                },
+                {
+                    {
+                        { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3,11,-5,-6, 7,-1,10}
+                    }
+                }
+            }
+        });
+
+        std::shared_ptr<Node> mySlice = Slice({0,0,0,0}, {1,1,1,9}, {0,1,2,3});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+        // mySlice->getOperator()->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+}
diff --git a/unit_tests/operator/Test_Sub_Op.cpp b/unit_tests/operator/Test_Sub_Op.cpp
index b7b744410d31ea32dea5a15cc7a29da093488d14..329f3da798854ddff3d1c1393d60c57ef180c70a 100644
--- a/unit_tests/operator/Test_Sub_Op.cpp
+++ b/unit_tests/operator/Test_Sub_Op.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 
 namespace Aidge {
-TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]") {
+TEST_CASE("[core/operator] Sub_Op(forwardDims)", "[Sub][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
@@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
 
     /**
      * @todo Special case: scalar not handled yet by
-     * ``OperatorTensor::computeOutputDims()``
+     * ``OperatorTensor::forwardDims()``
      */
     // SECTION("Scalar / Scalar") {
     //     // input_0
@@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
     //     // input_1
     //     T1->resize({});
 
-    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE_NOTHROW(op->forwardDims());
     //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
     // }
     // SECTION("Scalar / +1-D") {
@@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
     //         }
     //         T1->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
     //         }
     //         T0->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
 
             T0->resize(dims0);
             T1->resize(dims0);
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dims0);
         }
 
@@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
             T0->resize(dims0);
             T1->resize(dims1);
 
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dimsOut);
 
             // input_0 - wrong
@@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
             }
             T1->resize(dims1_wrong);
             REQUIRE(dims0 != dims1_wrong);
-            REQUIRE_THROWS(op->computeOutputDims());
+            REQUIRE_THROWS(op->forwardDims());
         }
     }
 }
diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8b6eafc70b7eefec6e1ccab9d0cfcde1eb4a09d5
--- /dev/null
+++ b/unit_tests/operator/Test_TransposeImpl.cpp
@@ -0,0 +1,123 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Transpose.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Transpose(forward)") {
+    SECTION("3D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> {
+            {
+                {{0.42507452, 0.11244237, 0.43243718, 0.62354952},
+                {0.90250170, 0.48719984, 0.45781207, 0.92536664},
+                {0.06348717, 0.91678733, 0.64452291, 0.00484818}},
+
+                {{0.66873497, 0.99508536, 0.55714869, 0.84887981},
+                {0.41666120, 0.92365038, 0.80034822, 0.38721532},
+                {0.52037925, 0.53937608, 0.66380072, 0.36330253}}
+            }
+        });
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array3D<float,2,4,3> { 
+            {
+                {{0.42507452, 0.90250170, 0.06348717},
+                {0.11244237, 0.48719984, 0.91678733},
+                {0.43243718, 0.45781207, 0.64452291},
+                {0.62354952, 0.92536664, 0.00484818}},
+
+                {{0.66873497, 0.41666120, 0.52037925},
+                {0.99508536, 0.92365038, 0.53937608},
+                {0.55714869, 0.80034822, 0.66380072},
+                {0.84887981, 0.38721532, 0.36330253}}
+            }
+        });
+        std::shared_ptr<Node> myTranspose = Transpose({0,2,1});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        myTranspose->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *output);
+    }
+    SECTION("4D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,2,3,1,4> {
+            {
+                {
+                    {
+                        {1, 2, 3, 4}
+                    },
+                    {
+                        {5, 6, 7, 8}
+                    },
+                    {
+                        {9, 10, 11, 12}
+                    }
+                },
+                {
+                    {
+                        {13, 14, 15, 16}
+                    },
+                    {
+                        {17, 18, 19, 20}
+                    },
+                    {
+                        {21, 22, 23, 24}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array4D<int,2,4,1,3> { 
+            {
+                {
+                    {
+                        {1, 5, 9}
+                    },
+                    {
+                        {2, 6, 10}
+                    },
+                    {
+                        {3, 7, 11}
+                    },
+                    {
+                        {4, 8, 12}
+                    }
+                },
+                {
+                    {
+                        {13, 17, 21}
+                    },
+                    {
+                        {14, 18, 22}
+                    },
+                    {
+                        {15, 19, 23}
+                    },
+                    {
+                        {16, 20, 24}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Node> myTranspose = Transpose({0,3,2,1});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myTranspose->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *output);
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp
index e2c1a8fcb96256fa8c3f26a3495913bd987de2d4..ceaa5e301c820ef54970a0e76004ad3467ae66da 100644
--- a/unit_tests/scheduler/Test_Scheduler.cpp
+++ b/unit_tests/scheduler/Test_Scheduler.cpp
@@ -54,7 +54,7 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") {
       if (unicity1) {
         for (auto &node : g1->getNodes()) {
           std::static_pointer_cast<GenericOperator_Op>(node->getOperator())
-              ->setComputeOutputDims(
+              ->setForwardDims(
                   GenericOperator_Op::InputIdentity(0, node->nbOutputs()));
         }
 
@@ -97,7 +97,7 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") {
     //   if (unicity1) {
     //     for (auto &node : g1->getNodes()) {
     //       std::static_pointer_cast<GenericOperator_Op>(node->getOperator())
-    //           ->setComputeOutputDims(
+    //           ->setForwardDims(
     //               GenericOperator_Op::InputIdentity(0, node->nbOutputs()));
     //     }