diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py
index 4aacfafd7d51830dc89b7b30ea5ebf521a13fe30..6e0c1f9b9a0828e266ef3bf19ee75df3e275b282 100644
--- a/aidge_core/unit_tests/test_impl.py
+++ b/aidge_core/unit_tests/test_impl.py
@@ -39,7 +39,7 @@ class test_OperatorImpl(unittest.TestCase):
         global GLOBAL_CPT
         matmul = aidge_core.GenericOperator("MatMul", 1, 0, 1, name="MatMul0")
         generic_matmul_op = matmul.get_operator()
-        generic_matmul_op.set_compute_output_dims(lambda x: x)
+        generic_matmul_op.set_forward_dims(lambda x: x)
         generic_matmul_op.set_impl(testImpl(generic_matmul_op))
         generic_matmul_op.forward()
         self.assertEqual(GLOBAL_CPT, 1)
@@ -52,6 +52,7 @@ class test_OperatorImpl(unittest.TestCase):
         self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
         conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
         conv.get_operator().set_backend("cpu")
+        conv.get_operator().set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
         conv.get_operator().forward()
         self.assertEqual(GLOBAL_CPT, 1)
 
@@ -65,6 +66,7 @@ class test_OperatorImpl(unittest.TestCase):
         conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
         model = aidge_core.sequential([conv])
         model.set_backend("cpu")
+        conv.get_operator().set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
         conv.get_operator().forward()
         self.assertEqual(GLOBAL_CPT, 1)
 
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index c94960733b24444218b1209463adbda11b89f6e8..164aee726255e0478b629ee853d9a1f619945f3a 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -92,14 +92,14 @@ class test_operator_binding(unittest.TestCase):
         attrs.set_attr("d", 23.89)
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
 
-    def test_compute_output_dims(self):
+    def test_forward_dims(self):
         in_dims=[25, 25]
         input = aidge_core.Producer(in_dims, name="In")
         genOp = aidge_core.GenericOperator("genOp", 1, 0, 1, name="genOp")
         _ = aidge_core.sequential([input, genOp])
         self.assertListEqual(genOp.get_operator().get_output(0).dims(), [])
-        genOp.get_operator().set_compute_output_dims(lambda x:x)
-        genOp.get_operator().compute_output_dims()
+        genOp.get_operator().set_forward_dims(lambda x:x)
+        genOp.get_operator().forward_dims()
         self.assertListEqual(genOp.get_operator().get_output(0).dims(), in_dims)
 
     def test_set_impl(self):
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 6a9056723df133fef62e56f969d39d8f69390a76..1fc9168da120ba87c916b1a6a346997be69184b4 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -23,7 +23,7 @@ class Operator;
 
 class OperatorImpl {
 public:
-    OperatorImpl(const Operator& op, const std::string& backend);
+    OperatorImpl(const Operator& op, const std::string& backend = "");
     virtual void forward();
     virtual void backward();
 
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 922acacb070c745b2924d1fb787602326ec9d05a..7cd8c67262221fbf9c1b2415ebf98db56274cce5 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -23,6 +23,8 @@ namespace Aidge {
 
 template <class T>
 class TensorImpl_cpu : public TensorImpl {
+    static_assert(std::is_trivially_copyable<T>::value, "TensorImpl type should be trivially copyable");
+
 private:
     /// Pointer to the data and its capacity
     future_std::span<T> mData;
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index b8623450a9c793e4efaff00d87455ab88aa60207..ead6c19fa5fe1e91ec1c24cf8dfee6146390477f 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -251,7 +251,6 @@ class Tensor : public Data,
         auto add_ = Add_Op(2);
         add_.associateInput(0, std::make_shared<Tensor>(*this));
         add_.associateInput(1, std::make_shared<Tensor>(other));
-        add_.computeOutputDims();
         add_.setDataType(dataType());
         add_.setBackend(mImpl->backend());
         add_.forward();
@@ -275,7 +274,6 @@ class Tensor : public Data,
         auto sub_ = Sub_Op();
         sub_.associateInput(0, std::make_shared<Tensor>(*this));
         sub_.associateInput(1, std::make_shared<Tensor>(other));
-        sub_.computeOutputDims();
         sub_.setDataType(dataType());
         sub_.setBackend(mImpl->backend());
         sub_.forward();
@@ -299,7 +297,6 @@ class Tensor : public Data,
         auto mul_ = Mul_Op();
         mul_.associateInput(0, std::make_shared<Tensor>(*this));
         mul_.associateInput(1, std::make_shared<Tensor>(other));
-        mul_.computeOutputDims();
         mul_.setDataType(dataType());
         mul_.setBackend(mImpl->backend());
         mul_.forward();
@@ -323,7 +320,6 @@ class Tensor : public Data,
         auto div_ = Div_Op();
         div_.associateInput(0, std::make_shared<Tensor>(*this));
         div_.associateInput(1, std::make_shared<Tensor>(other));
-        div_.computeOutputDims();
         div_.setDataType(dataType());
         div_.setBackend(mImpl->backend());
         div_.forward();
@@ -529,6 +525,7 @@ public:
     template <typename expectedType>
     const expectedType& get(std::size_t idx) const {
         AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
+        AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
         AIDGE_ASSERT(idx < mSize, "idx out of range");
         return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx));
     }
@@ -541,6 +538,7 @@ public:
     template <typename expectedType>
     void set(std::size_t idx, expectedType value){
         AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
+        AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
         AIDGE_ASSERT(idx < mSize, "idx out of range");
         expectedType* dataPtr = static_cast<expectedType*>(mImpl->hostPtr(mImplOffset + idx));
         *dataPtr = value;
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 845599fd32f9d2557784241d3d39747768638efa..59c538ce640f9fb8a45c26a29b0c2599d883553e 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -210,7 +210,7 @@ public:
      * @brief Compute dimensions of input/output Tensors for each Operator of the
      * GraphView object's Nodes.
      */
-    void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {});
+    bool forwardDims(const std::vector<std::vector<DimSize_t>> dims = {}, bool allowDataDependency = false);
 
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 93cfb44514e39a489ccb75d86fd6e114da5c6162..4ac14bdaecd16e90586d14699f3b6f1bd6d88cab 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -60,7 +60,7 @@ public:
     // }
 
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 4a8ca19a58427f207f9a4cae0dc9d0c29b54d7e7..af2993d67f16df498f13a0489a3837a8f9fc4a75 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -65,7 +65,7 @@ public:
     }
 
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
 
     std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 64ae368f377d264378036e62175dc10b17aff0f4..aa53f8c43f0be2a0e094946d66fd263bc19e39f5 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -68,7 +68,7 @@ public:
     // }
 
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index bbc776a1175a1fc29d08c3872649a6b7aac2f04f..6efbc0a214dde3ca969226f734b5ee903fe5ab50 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -24,13 +24,20 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Cast_OpImpl : public OperatorImpl {
+public:
+    Cast_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
 
 class Cast_Op : public OperatorTensor,
     public Registrable<Cast_Op, std::string, std::unique_ptr<OperatorImpl>(const Cast_Op&)> {
 public:
     static const std::string Type;
 
-    Cast_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Cast_Op() : OperatorTensor(Type, 1, 0, 1) {
+        mImpl = std::make_shared<Cast_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -39,10 +46,11 @@ public:
     Cast_Op(const Cast_Op& op)
         : OperatorTensor(op)
     {
-        if (op.mImpl) {
+        if (!op.backend().empty()) {
             SET_IMPL_MACRO(Cast_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
+        }
+        else {
+            mImpl = std::make_shared<Cast_OpImpl>(*this);
         }
     }
 
@@ -56,8 +64,6 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
-    void forward() override;
-
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 611ff6bd53b1f16f87f73dd951d0645b9765262e..a9a4c9253f3af9f9cd82390256ec70d066017cc5 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -26,6 +26,12 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Concat_OpImpl : public OperatorImpl {
+public:
+    Concat_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
 enum class ConcatAttr { Axis };
 
 class Concat_Op : public OperatorTensor,
@@ -45,6 +51,7 @@ public:
         if (nbIn == 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
         }
+        mImpl = std::make_shared<Concat_OpImpl>(*this);
     }
 
     /**
@@ -55,10 +62,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
+        if (!op.backend().empty()) {
             SET_IMPL_MACRO(Concat_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
+        }
+        else {
+            mImpl = std::make_shared<Concat_OpImpl>(*this);
         }
     }
 
@@ -70,7 +78,7 @@ public:
         return std::make_shared<Concat_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index c93a098106be76f30c1150ea64c464492429feb9..d6a0df5ab472c4a728e5b5042258d6d2bd34f871 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -108,7 +108,7 @@ public:
 
     // }
 
-    void computeOutputDims() override final {
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
         // check inputs have been associated
         bool associated = true;
         for (IOIndex_t i = 0; i < 3; ++i) {
@@ -118,6 +118,17 @@ public:
             associated &= !(getInput(i)->empty());
         }
         if (associated) {
+            AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
+                     (getInput(0)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()),
+                     "Wrong input size for Conv operator.");
+            AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)) &&
+                        (getInput(1)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()) &&
+                        (getInput(1)->template dims<DIM+2>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
+                        "Wrong weight size for Conv operator.");
+            if(!this->template getAttr<ConvAttr::NoBias>())
+                AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
+                        (getInput(2)->template dims<1>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
+                        "Wrong bias size for Conv operator.");
             std::array<DimSize_t, DIM + 2> outputDims{};
             const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
@@ -135,6 +146,8 @@ public:
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
         }
+
+        return associated;
     }
 
     std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
@@ -147,7 +160,7 @@ public:
         if (firstEltDims.size() != outputDims.size()) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
         }
-        if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+        if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
             // Offset
             auto inputIdxDims = firstEltDims; // batch idx is the same
             inputIdxDims[1] = 0; // each channel is used so start with the first one
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 559c0fc7a97a3a882f6720a91d02dee1af70abd8..2337ff66f00b932a190d5b1735d53df3da8ffdbf 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -90,7 +90,7 @@ public:
     }
 
 
-    void computeOutputDims() override final {
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
         // check inputs have been associated
         // TODO : add a check of inputs dimensions ?
         bool associated = true;
@@ -124,6 +124,8 @@ public:
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
         }
+
+        return associated;
     }
 
     std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
@@ -133,7 +135,7 @@ public:
         if (firstEltDims.size() != outputDims.size()) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
         }
-        if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+        if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
             // Offset
             auto inputIdxDims = firstEltDims; // batch idx is the same
 
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 49410db044518dc3ca2cc33285d570197d83b10a..566f4a6ae69b090b3a035b034406d463eeb77317 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -54,7 +54,7 @@ public:
         return std::make_shared<Div_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 222f0ec1235a946865d1b06948bf8b72c5be5a48..b97874f4e0deafd685453b3ce9865e65fafe7561 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -71,7 +71,7 @@ public:
 
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index b7d18e6443404730bbcb73cf7e6da97b8b3e6a7c..7534b66951cc9d8074d0af7742ba5165013431f5 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -25,6 +25,12 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Gather_OpImpl : public OperatorImpl {
+public:
+    Gather_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
 enum class GatherAttr { Indices, GatheredShape, Axis };
 
 class Gather_Op : public OperatorTensor,
@@ -46,7 +52,9 @@ public:
                 attr<GatherAttr::Indices>(indices),
                 attr<GatherAttr::GatheredShape>(gatheredShape),
                 attr<GatherAttr::Axis>(axis))
-    {}
+    {
+        mImpl = std::make_shared<Gather_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -56,10 +64,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
+        if (!op.backend().empty()) {
             SET_IMPL_MACRO(Gather_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
+        }
+        else {
+            mImpl = std::make_shared<Gather_OpImpl>(*this);
         }
     }
 
@@ -71,7 +80,7 @@ public:
         return std::make_shared<Gather_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index e7d60285b4d45826f1d73635d54f4532b4fb1598..f0b7e92d708dfef65eea0ec7649ccc8716533679 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -31,13 +31,13 @@ class GenericOperator_Op
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
-    ComputeDimsFunc mComputeOutputDims;
+    ComputeDimsFunc mForwardDims;
 
 public:
     GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
         : OperatorTensor(type, nbData, nbParam, nbOut)
     {
-        mImpl = std::make_shared<OperatorImpl>(*this, "");
+        mImpl = std::make_shared<OperatorImpl>(*this);
     }
 
     /**
@@ -61,18 +61,18 @@ public:
     }
 
 public:
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
-    bool outputDimsForwarded() const override final;
+    bool dimsForwarded() const override final;
 
     void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); }
     void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); }
 
-    // Helper functions that can be used with setComputeOutputDims():
+    // Helper functions that can be used with setForwardDims():
     static const ComputeDimsFunc Identity;
     static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs);
-    inline void setComputeOutputDims(ComputeDimsFunc func) {
-        mComputeOutputDims = func;
+    inline void setForwardDims(ComputeDimsFunc func) {
+        mForwardDims = func;
     }
 };
 
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 12c8eb02d9488edeb760b6a063cfac5f8257db18..74529a0ba9481bf6280df8d3ce496f67635a5aef 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -52,7 +52,7 @@ public:
     return std::make_shared<GlobalAveragePooling_Op>(*this);
   }
 
-  void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
   void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 27432bc5bb251003e9e93261593e12c2fa704f3d..367aa4e2d68fb1095b1e3b3be76f6ab59439e47f 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -42,7 +42,7 @@ public:
     Identity_Op()
         : OperatorTensor(Type, 1, 0, 1)
     {
-        mImpl = std::make_shared<OperatorImpl>(*this, "");
+        mImpl = std::make_shared<OperatorImpl>(*this);
     }
 
     /**
@@ -63,7 +63,7 @@ public:
         return std::make_shared<Identity_Op>(*this);
     }
 
-    void computeOutputDims() override final {} // Do nothing
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
 
     /**
      * @brief Check if output dimensions have been computed.
@@ -73,34 +73,15 @@ public:
      * @return true Input has dimensions.
      * @return false Input has no dimensions or is a nullptr.
      */
-    bool outputDimsForwarded() const override final {
+    bool dimsForwarded() const override final {
         return mInputs[0] ? !mInputs[0]->empty() : false;
     }
 
 
-    void forward() override final { runHooks(); }
+    void forward() override final;
 
     void backward() override final { }
 
-    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override final {
-        AIDGE_ASSERT(data->type() == "Tensor", "{} Operator only accepts Tensors as outputs", type());
-        AIDGE_ASSERT(outputIdx < nbInputs(), "{} Operator has {} outputs", type(), nbInputs());
-        *mInputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data);
-    }
-
-    void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override final {
-        AIDGE_ASSERT(data->type() == "Tensor", "{} Operator only accepts Tensors as inputs", type());
-        AIDGE_ASSERT(outputIdx < nbInputs(), "{} Operator has {} outputs", type(), nbInputs());
-        *mInputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
-    }
-
-    const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const override final {
-        AIDGE_ASSERT(outputIdx < nbInputs(), "{} Operator has {} outputs", type(), nbInputs());
-        if (mInputs[outputIdx] == nullptr){
-            return mOutputs[outputIdx]; // Input is not initialized with empty tensor
-        }
-        return mInputs[outputIdx]; // Identity, so Output is Input
-    }
     void setBackend(const std::string& /*name*/, DeviceIdx_t /*device*/ = 0) override final {
         // setBackend do nothing, Identity node has no backend it just pass the same Tensor
     }
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index 43bd8b1654206df15cd869cf2d37a216fcc4a733..580d720e617e5b20c0acc7ce5e7f200fe5b25606 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -64,7 +64,7 @@ public:
      * @note - Second input is 1-D: it is promoted to a matrix by appending a 1 to its
      * dimensions (D) -> (D,1). The appended 1 is removed after computation.
      */
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 5b09aa02cd0665172a9ae69549d8d9311e10d024..8aff1582604a9e23e248e7c01521567483c793ad 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -84,7 +84,7 @@ public:
     }
 
 
-    void computeOutputDims() override final {
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
         if (!getInput(0)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
         }
@@ -108,7 +108,9 @@ public:
             outputDims[1] = inputDims[1];
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
+            return true;
         }
+        return false;
     }
 
 
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 7de34563adcaabd63ab036232d4d7b6539fd11eb..6b0ace2eb09fde069f8b9b104f92fc33811c25aa 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -25,6 +25,15 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Memorize_OpImpl : public OperatorImpl {
+public:
+    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    void updateConsummerProducer() override;
+    void forward() override;
+};
+
 enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep };
 
 class Memorize_Op : public OperatorTensor,
@@ -73,8 +82,8 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
-    void computeOutputDims() override;
-    bool outputDimsForwarded() const override;
+    bool forwardDims(bool allowDataDependency = false) override final;
+    bool dimsForwarded() const override;
     void updateConsummerProducer() override;
     void forward() override;
 
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 5ac9cf3c92b1951407e4c1892b1a8dc70a724013..c677da0f2e34a299ddec6ee85f5a84616206193d 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -81,7 +81,7 @@ public:
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    void computeOutputDims() override final {
+    bool forwardDims(bool allowDataDependency = false) override final {
         // Check first that all required inputs are available, otherwise
         // mGraph->forwardDims() will fail!
         bool forwarded = true;
@@ -91,8 +91,9 @@ public:
 
         if (forwarded) {
             // Forward dims of micro-graph
-            mGraph->forwardDims();
+            return mGraph->forwardDims({}, allowDataDependency);
         }
+        return false;
     }
 
 
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index 3652cf9697c6bcfea4befe4cdcdf5b9efff8b70c..e9bcaa871619828a50dcd407d39744e7983fe2c4 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -24,13 +24,20 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Move_OpImpl : public OperatorImpl {
+public:
+    Move_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
 
 class Move_Op : public OperatorTensor,
     public Registrable<Move_Op, std::tuple<std::string, std::string>, std::unique_ptr<OperatorImpl>(const Move_Op&)> {
 public:
     static const std::string Type;
 
-    Move_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Move_Op() : OperatorTensor(Type, 1, 0, 1) {
+        mImpl = std::make_shared<Move_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -39,7 +46,12 @@ public:
     Move_Op(const Move_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Move_Op>::create({mInputs[0]->getImpl()->backend(), mOutputs[0]->getImpl()->backend()})(*this) : nullptr;
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Move_Op, *this, {op.getInput(0)->getImpl()->backend(), op.backend()});
+        }
+        else {
+            mImpl = std::make_shared<Move_OpImpl>(*this);
+        }
     }
 
     /**
@@ -50,14 +62,7 @@ public:
         return std::make_shared<Move_Op>(*this);
     }
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        if (mInputs[0]->getImpl() && Registrar<Move_Op>::exists({mInputs[0]->getImpl()->backend(), name})) {
-            mImpl = Registrar<Move_Op>::create({mInputs[0]->getImpl()->backend(), name})(*this);
-        }
-        mOutputs[0]->setBackend(name, device);
-    }
-
-    void forward() override;
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index cc9fba59431356a132330e453288f2f6e7141178..f53a38a82a6771e416435222137e72366f5f69f3 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -57,7 +57,7 @@ public:
         return std::make_shared<Mul_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index adf45c2d8311112fa145097ee98f46d120bd41ff..6086c5145eb39cee081468ba91473dc983cfa35f 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -80,11 +80,13 @@ public:
      * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
      */
     virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
-    virtual void computeOutputDims();
-    virtual bool outputDimsForwarded() const;
+    virtual bool forwardDims(bool allowDataDependency = false);
+    virtual bool dimsForwarded() const;
     ///////////////////////////////////////////////////
 
     virtual void setDataType(const DataType& dataType) const override;
+    
+    virtual void forward() override;
 };
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index dce2a6e9e5ea9e0c5fe9a841c587c1f7bbe36fc7..a4e4ebdce801971de118ca8a263999046a13777d 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -74,7 +74,7 @@ public:
     }
 
 
-    void computeOutputDims() override final {
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
         bool associated = true;
         for (IOIndex_t i = 0; i < nbInputs(); ++i) {
             if (!getInput(i)) {
@@ -95,6 +95,8 @@ public:
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
         }
+
+        return associated;
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 9109ccaeb8bc648fe74510216fad93299740b9bf..2219f30ec9db7acf55491882a78e7a1ed2931cf0 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -24,6 +24,13 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Pop_OpImpl : public OperatorImpl {
+public:
+    Pop_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+    void forward() override;
+};
+
 enum class PopAttr { ForwardStep };
 
 class Pop_Op : public OperatorTensor,
@@ -39,7 +46,9 @@ public:
     Pop_Op()
         : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<PopAttr::ForwardStep>(0))
-    {}
+    {
+        mImpl = std::make_shared<Pop_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -49,10 +58,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
+        if (!op.backend().empty()) {
             SET_IMPL_MACRO(Pop_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
+        }
+        else {
+            mImpl = std::make_shared<Pop_OpImpl>(*this);
         }
     }
 
@@ -66,7 +76,7 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
     void updateConsummerProducer() override;
     void forward() override;
 
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index f2becdc60ceb44c19e341496f71e09f061cea55f..08c4de2a254dd267eda4040b54108f93a0c2d922 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -53,7 +53,7 @@ public:
         return std::make_shared<Pow_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 1e5a3940ba22c659121e76e1855353168d68441a..7e9072857dae8fa3137065e5c47cc11d88d37efe 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -47,7 +47,7 @@ public:
           Attributes_(attr<ProdAttr::Constant>(constant))
     {
         mOutputs[0]->resize(dims);
-        mImpl = std::make_shared<OperatorImpl>(*this, "");
+        mImpl = std::make_shared<OperatorImpl>(*this);
     }
 
     /**
@@ -86,9 +86,9 @@ public:
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
     }
 
-    void computeOutputDims() noexcept override final {}
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
 
-    inline bool outputDimsForwarded() const noexcept override final { return true; }
+    inline bool dimsForwarded() const noexcept override final { return true; }
 
 
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
@@ -102,9 +102,8 @@ public:
         return {"data_output"};
     }
 
-    void forward() override final {
-        fmt::print("Basic Producer forward() function.\n");
-    }
+    void forward() override final;
+
     void backward() override final {
         fmt::print("Basic Producer backward() function.\n");
     }
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index ab27e4e0233052f7cc155ed0375175a27d3edcf5..ff8d8b0696aafdab48cd37d049fa0473078d7ea6 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -69,7 +69,7 @@ class ReduceMean_Op : public OperatorTensor,
         return std::make_shared<ReduceMean_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 060029bb87ea142728056b3817b8162d566cb458..49ddfc4d76a0602c58c0c768b04ed4b4202f028d 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -23,6 +23,11 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Reshape_OpImpl : public OperatorImpl {
+public:
+    Reshape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
 
 enum class ReshapeAttr { Shape };
 
@@ -42,7 +47,9 @@ public:
     Reshape_Op(const std::vector<std::int64_t>& shape)
         : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<ReshapeAttr::Shape>(shape))
-    {}
+    {
+        mImpl = std::make_shared<Reshape_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -52,10 +59,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
+        if (!op.backend().empty()) {
             SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
+        }
+        else {
+            mImpl = std::make_shared<Reshape_OpImpl>(*this);
         }
     }
 
@@ -67,7 +75,7 @@ public:
         return std::make_shared<Reshape_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index f68aa17f480038d8ff7850577c438cfdc6704d59..757e08fe97dd1cc572c08ac7c2b454daa234bdc1 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -24,6 +24,12 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Slice_OpImpl : public OperatorImpl {
+public:
+    Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
 enum class SliceAttr { Starts, Ends, Axes };
 
 class Slice_Op
@@ -44,7 +50,9 @@ public:
           Attributes_(attr<SliceAttr::Starts>(starts),
                       attr<SliceAttr::Ends>(ends),
                       attr<SliceAttr::Axes>(axes))
-    {}
+    {
+        mImpl = std::make_shared<Slice_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
@@ -55,10 +63,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
+        if (!op.backend().empty()) {
             SET_IMPL_MACRO(Slice_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
+        }
+        else {
+            mImpl = std::make_shared<Slice_OpImpl>(*this);
         }
     }
 
@@ -69,12 +78,9 @@ public:
      */
     std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Slice_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index fbcebcc9f62c23e9c60b5dff6f0d41c10d8b8717..e5d8442851c35e9232fdd77d862fb48b71c76f1f 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -57,7 +57,7 @@ public:
         return std::make_shared<Sub_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 1beb5781b9262669cd2acb6ce4ef3aae85843573..16ac2794a283d817f6a4e1586349e55ec626167e 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -26,40 +26,47 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Transpose_OpImpl : public OperatorImpl {
+public:
+    Transpose_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
 enum class TransposeAttr { OutputDimsOrder };
 
-template <DimIdx_t DIM>
 class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
-                public StaticAttributes<TransposeAttr,
-                                       std::array<DimSize_t, DIM>> {
+                public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)>,
+                public StaticAttributes<TransposeAttr, std::vector<DimSize_t>> {
 
    public:
     static const std::string Type;
 
     Transpose_Op() = delete;
 
-    using Attributes_ = StaticAttributes<TransposeAttr,
-                                             std::array<DimSize_t, DIM>>;
+    using Attributes_ = StaticAttributes<TransposeAttr, std::vector<DimSize_t>>;
     template <TransposeAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr Transpose_Op(const std::array<DimSize_t, DIM> &output_dims_order)
+    Transpose_Op(const std::vector<DimSize_t> &output_dims_order)
         : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order)) { }
+          Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order))
+    {
+        mImpl = std::make_shared<Transpose_OpImpl>(*this);
+    }
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Transpose_Op(const Transpose_Op<DIM>& op)
+    Transpose_Op(const Transpose_Op& op)
         : OperatorTensor(op),
           Attributes_(op)
     {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.backend());
-        }else{
-            mImpl = nullptr;
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
+        }
+        else {
+            mImpl = std::make_shared<Transpose_OpImpl>(*this);
         }
     }
 
@@ -68,25 +75,12 @@ class Transpose_Op : public OperatorTensor,
      * @see Operator::Transpose_Op
      */
     std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Transpose_Op<DIM>>(*this);
+        return std::make_shared<Transpose_Op>(*this);
     }
 
-    void computeOutputDims() override final {
-        if (!getInput(0)->empty()) {
-            auto attr = (this)->getStaticAttributes();
-            const std::array<DimSize_t, DIM>& outDimsOrder = static_cast<const std::array<DimSize_t, DIM>&>(std::get<0>(attr));
-            std::vector<DimSize_t> outputDims;
-            for (std::size_t i = 0; i < DIM; ++i) {
-                outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
-            }
-            mOutputs[0]->resize(outputDims);
-        }
-    }
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Transpose_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -96,26 +90,10 @@ class Transpose_Op : public OperatorTensor,
     }
 };
 
-template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Transpose(const std::array<DimSize_t, DIM> &output_dims_order,
+inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &output_dims_order,
                                            const std::string& name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Transpose, not supported");
-    return std::make_shared<Node>(std::make_shared<Transpose_Op<static_cast<DimIdx_t>(DIM)>>(output_dims_order), name);
-}
-
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
-template <DimSize_t DIM>
-inline std::shared_ptr<Node> Transpose(
-    DimSize_t const (&output_dims_order)[DIM],
-    const std::string& name = "") {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Transpose, not supported");
-    return Transpose(to_array(output_dims_order), name);
+    return std::make_shared<Node>(std::make_shared<Transpose_Op>(output_dims_order), name);
 }
-
-template <DimIdx_t DIM>
-const std::string Transpose_Op<DIM>::Type = "Transpose";
-
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index a6d1d7a9eb5d88dedaf73564847b0f4fbd797c43..b0acdaff7cb75afec78f0564fb95c98f2b32f47b 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -129,16 +129,16 @@ void declare_registrable(py::module& m, const std::string& class_name){
 *   cyril.moineau@cea.fr
 */
 #ifdef PYBIND
-#define SET_IMPL_MACRO(T_Op, op, backend_name) \
+#define SET_IMPL_MACRO(T_Op, op, ...) \
     if(Py_IsInitialized()) { \
         auto obj = py::cast(&(op)); \
-        (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \
+        (op).setImpl(Registrar<T_Op>::create(__VA_ARGS__)(op)); \
     } else { \
-        (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \
+        (op).setImpl(Registrar<T_Op>::create(__VA_ARGS__)(op)); \
     }
 #else
-#define SET_IMPL_MACRO(T_Op, op, backend_name)                   \
-    (op).setImpl(Registrar<T_Op>::create(backend_name)(op));
+#define SET_IMPL_MACRO(T_Op, op, ...)                   \
+    (op).setImpl(Registrar<T_Op>::create(__VA_ARGS__)(op));
 #endif
 
 }
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index d3ea0dd18b740395165002ce538b6de6b82a2df8..1000374454020625aada7f2043893b229deec833 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -117,7 +117,7 @@ void init_GraphView(py::module& m) {
           .def("clone", &GraphView::clone)
           .def("get_nodes", &GraphView::getNodes)
           .def("get_node", &GraphView::getNode, py::arg("node_name"))
-          .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>())
+          .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>(), py::arg("allow_data_dependency") = false)
           .def("compile", &GraphView::compile, py::arg("backend"), py::arg("datatype"), py::arg("device") = 0, py::arg("dims")=std::vector<std::vector<DimSize_t>>())
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
           .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 31ee946fc99df40133ff04965c762f9ddae0d131..897cd359a4b368dc599f37136ade3508b5ec5a76 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -25,7 +25,7 @@ void init_GenericOperator(py::module& m) {
     py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
     .def_readonly_static("identity", &GenericOperator_Op::Identity)
-    .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"));
+    .def("set_forward_dims", &GenericOperator_Op::setForwardDims, py::arg("computation_function"));
 
     // &GenericOperator
     m.def("GenericOperator",
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 4796917fbe34dbf3b7455841c9e3f1c13ca9c64d..e00f70413614a96919c2a068303b3fbc3f6eca8d 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -25,6 +25,7 @@ namespace py = pybind11;
 namespace Aidge {
 void init_Operator(py::module& m){
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
+    .def("backend", &Operator::backend)
     .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput), py::arg("outputIdx"), py::arg("data"))
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
     .def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx"))
diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp
index c56e80a47e1142900ff844e7d9889011dee65060..4d4541ab36468bc6b531e0242888dd70c5afc71f 100644
--- a/python_binding/operator/pybind_OperatorTensor.cpp
+++ b/python_binding/operator/pybind_OperatorTensor.cpp
@@ -30,8 +30,8 @@ void init_OperatorTensor(py::module& m){
 
     .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data"))
     .def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data"))
-    .def("compute_output_dims", &OperatorTensor::computeOutputDims)
-    .def("output_dims_forwarded", &OperatorTensor::outputDimsForwarded)
+    .def("forward_dims", &OperatorTensor::forwardDims, py::arg("allow_data_dependency") = false)
+    .def("dims_forwarded", &OperatorTensor::dimsForwarded)
     ;
 }
 }
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index f6e2f2225e4858d3385c5d0140a863e7e7705652..63b22608d1737f9a59caffd4517fc0e9cfc4dd91 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -25,32 +25,19 @@
 namespace py = pybind11;
 namespace Aidge {
 
-template <DimIdx_t DIM>
 void declare_Transpose(py::module &m) {
-  const std::string pyClassName("TransposeOp" + std::to_string(DIM) + "D");
-  py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
-  .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName)
-  .def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName);
-
-  declare_registrable<Transpose_Op<DIM>>(m, pyClassName);
-
-  m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
-                                                                  const std::string& name) {
-        AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [{}] does not match DIM [{}]", output_dims_order.size(), DIM);
-        return Transpose<DIM>(to_array<DIM>(output_dims_order.begin()), name);
-    }, py::arg("output_dims_order"),
-       py::arg("name") = "");
-
+  const std::string pyClassName("TransposeOp");
+  py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, Attributes, OperatorTensor>(
+    m, "TransposeOp", py::multiple_inheritance())
+  .def("get_inputs_name", &Transpose_Op::getInputsName)
+  .def("get_outputs_name", &Transpose_Op::getOutputsName)
+  .def("attributes_name", &Transpose_Op::staticGetAttrsName);
+  declare_registrable<Transpose_Op>(m, pyClassName);
+  m.def("Transpose", &Transpose, py::arg("output_dims_order"), py::arg("name") = "");
 }
 
 void init_Transpose(py::module &m) {
-  declare_Transpose<2>(m);
-  declare_Transpose<3>(m);
-  declare_Transpose<4>(m);
-  declare_Transpose<5>(m);
-  declare_Transpose<6>(m);
+  declare_Transpose(m);
 
 }
 } // namespace Aidge
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index dcd7a06ef8560ad6d4a572cd823e2f9dc357b73c..df2177cf6910a3c40ef269d18bf148d60b5faa66 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -391,7 +391,7 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
     forwardDims(dims);
 }
 
-void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims) {
+bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims, bool allowDataDependency) {
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
@@ -406,19 +406,14 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
     // Ensure every node in the graph is correctly connected
     for (std::shared_ptr<Node> nodePtr : getNodes()) {
         for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
-            // assess if the input was not already set and is a Tensor then link it to parent output
             std::pair<std::shared_ptr<Node>, IOIndex_t> inputI = nodePtr->input(i);
             if (inputI.first) {
-                if ( std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i)) != inputI.first->getOperator()->getRawOutput(inputI.second)) {
-                    if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
-                        // assert provided Data is of "Tensor" type
-                        nodePtr->getOperator()->associateInput(i, inputI.first->getOperator()->getRawOutput(inputI.second));
-                    }
-                    else {
-                        AIDGE_ASSERT(false, "Non-tensor entries not handled yet, for node {} (of type {}).", nodePtr->name(), nodePtr->type());
-                    }
-                }
+                // Check that associated Data are properly connected...
+                AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i) == inputI.first->getOperator()->getRawOutput(inputI.second),
+                  "Input#{} for node {} ({}) is not properly connected to output#{} of node {} ({}): Data or Tensor mismatch!",
+                    i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type());
             } else {
+                // Input is missing
                 AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i)
                     && !std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(),
                   "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
@@ -436,8 +431,8 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
               const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
               // Recompute everytime, even if it was already computed in a
               // previous call of forwardDims(), as the graph may have changed!
-              op->computeOutputDims();
-              if (!op->outputDimsForwarded()) {
+              op->forwardDims(allowDataDependency);
+              if (!op->dimsForwarded()) {
                   nextList.insert(nodePtr);
               }
             }
@@ -450,12 +445,16 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
             std::transform(nextList.begin(), nextList.end(),
                 std::back_inserter(nodesName),
                 [](auto val){ return val->name() + " (" + val->type() + ")"; });
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Unable to forward dimensions (circular dependency and/or wrong dimensions?). Unable to compute output dims for nodes {}.", nodesName);
+
+            Log::warn("Unable to forward dimensions (circular dependency and/or wrong dimensions and/or data dependent dimension?). Unable to compute output dims for nodes {}.", nodesName);
+            return false;
         }
 
         listNodes.swap(nextList);
     }
     while (!listNodes.empty());
+
+    return listNodes.empty();
 }
 
 void Aidge::GraphView::setBackend(const std::string &backend, const DeviceIdx_t device) const {
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 8cc03907fd0e2c2a13eaacad41b5c1e21fde06c2..6bafb3b7905ae36e23af32f8d60be33a4ba178bf 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -32,7 +32,7 @@ Aidge::Add_Op::Add_Op(const Add_Op& op)
     }
 }
 
-void Aidge::Add_Op::computeOutputDims() {
+bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     bool associated = (nbInputs() > 0); // do not compute anything if no input
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
@@ -70,6 +70,8 @@ void Aidge::Add_Op::computeOutputDims() {
         }
         mOutputs[0]->resize(outDims);
     }
+
+    return associated;
 }
 
 void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index acb097668bce0ff6f335f577faed503e086db79f..07123bc88aa1da22bfa98166d6a01af8d66be98d 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -36,7 +36,7 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op): Operator
 }
 
 template <Aidge::DimIdx_t DIM>
-void Aidge::AvgPooling_Op<DIM>::computeOutputDims() {
+bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
@@ -54,7 +54,9 @@ void Aidge::AvgPooling_Op<DIM>::computeOutputDims() {
                                         static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
         }
         getOutput(0)->resize(outputDims);
+        return true;
     }
+    return false;
 }
 
 
@@ -69,7 +71,7 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz
     if (firstEltDims.size() != outputDims.size()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
     }
-    if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+    if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
         // Offset
         std::vector<DimSize_t> inputIdxDims = firstEltDims;
 
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index b14f0238809b9ec9b6b186d093ecf3b1554865cb..14bf65763c024ffe28d30654a49c9630737a12fd 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -36,7 +36,7 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op): OperatorTen
 }
 
 template <Aidge::DimIdx_t DIM>
-void Aidge::BatchNorm_Op<DIM>::computeOutputDims() {
+bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     bool associated = true;
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
@@ -53,6 +53,7 @@ void Aidge::BatchNorm_Op<DIM>::computeOutputDims() {
         }
         mOutputs[0]->resize(getInput(0)->dims());
     }
+    return associated;
 }
 
 template <Aidge::DimIdx_t DIM>
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index 4f1ac55898b11668ba1c2f5299f8e1ca1d4e5df1..f1c8e25e17c80d58d444a1ddddbaa428b2fc4c41 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -20,22 +20,19 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Cast_Op::Type = "Cast";
-
-void Aidge::Cast_Op::forward() {
-    if (mImpl) {
-        mImpl->forward();
-    }
-    else {
-        mOutputs[0]->copyCast(*(mInputs[0]));
-    }
-
-    runHooks();
+void Aidge::Cast_OpImpl::forward() {
+    const Cast_Op& op = dynamic_cast<const Cast_Op&>(mOp);
+    op.getOutput(0)->copyCast(*(op.getInput(0)));
 }
 
+const std::string Aidge::Cast_Op::Type = "Cast";
+
 void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Cast_Op>::exists({name})) {
         SET_IMPL_MACRO(Cast_Op, *this, name);
     }
+    else {
+        mImpl = std::make_shared<Cast_OpImpl>(*this);
+    }
     mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 7df5b6dbf6122da44aed280da0d717232ba42fef..ee06ce69b135e11fe3ed5be8fa9f501debb6acd5 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -18,9 +18,48 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+void Aidge::Concat_OpImpl::forward() {
+    const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
+    const DimSize_t axis = op.template getAttr<DimSize_t>("Axis");
+
+    assert(op.getInput(0) && "missing input in Concat operator");
+    DataType datatypeFirstInput = op.getInput(0)->dataType();
+    for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) {
+        assert(op.getInput(i) && "missing input in Concat operator");
+        assert(op.getInput(i)->dataType() == datatypeFirstInput);
+    }
+
+    DimSize_t outputAxisValue = 0;
+    for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
+        outputAxisValue += op.getInput(i)->dims()[axis];
+    }
+
+    DimSize_t prodDimLower = 1;
+    for (DimIdx_t i = 0; i < axis; ++i) {
+        prodDimLower *= op.getInput(0)->dims()[i];
+    }
+    DimSize_t prodDimHigher = 1;
+    for (DimIdx_t i = axis + 1; static_cast<std::size_t>(i) < op.getInput(0)->dims().size();
+         ++i) {
+        prodDimHigher *= op.getInput(0)->dims()[i];
+    }
+
+    std::size_t oIndexStart = 0;
+    // std::size_t oIndex = 0;
+    for (std::size_t inputId = 0; inputId < op.nbInputs(); ++inputId) {
+        // oIndex = oIndexStart;
+        const DimSize_t iOffset = prodDimHigher*op.getInput(inputId)->dims()[axis];
+        for (std::size_t iIndex = 0, oIndex = oIndexStart; iIndex < prodDimLower; ++iIndex) {
+            op.getOutput(0)->getImpl()->copy(op.getInput(inputId)->getImpl()->rawPtr(iIndex*iOffset), iOffset, oIndex);
+            oIndex += prodDimHigher*outputAxisValue;
+        }
+        oIndexStart += op.getInput(inputId)->dims()[axis]*prodDimHigher;
+    }
+}
+
 const std::string Aidge::Concat_Op::Type = "Concat";
 
-void Aidge::Concat_Op::computeOutputDims() {
+bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
     // Every input is non-empty with the same number of dimensions
     bool associated = (getInput(0) != nullptr);
     associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
@@ -49,9 +88,16 @@ void Aidge::Concat_Op::computeOutputDims() {
     if (associated) {
         getOutput(0)->resize(outputDims);
     }
+
+    return associated;
 }
 
 void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
-    SET_IMPL_MACRO(Concat_Op, *this, name);
+    if (Registrar<Concat_Op>::exists({name})) {
+        SET_IMPL_MACRO(Concat_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Concat_OpImpl>(*this);
+    }
     mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 23a676956d6701f876b50d1fda51fb4684ae1038..813ab774b11cd72f440d28f61843500686d7df2d 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -22,7 +22,7 @@
 
 const std::string Aidge::Div_Op::Type = "Div";
 
-void Aidge::Div_Op::computeOutputDims() {
+bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
@@ -50,7 +50,10 @@ void Aidge::Div_Op::computeOutputDims() {
             --low_id;
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 9865d64f6a0b87be96244bc4b39c91b605f02b6f..ba7e29e7b6543a570ceede6158bd306286037c10 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -36,7 +36,7 @@ void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::sh
         mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()});
 }
 
-void Aidge::FC_Op::computeOutputDims() {
+bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
     bool associated = true;
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
         if (!getInput(i)) {
@@ -48,6 +48,8 @@ void Aidge::FC_Op::computeOutputDims() {
         // <batch, OutChannels>
         mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
     }
+
+    return associated;
 }
 
 void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 259e6513994970eb7e677f44c981888388825fae..7b0945271660be8f309024f46c258e6a7e2193e5 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -20,10 +20,39 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+void Aidge::Gather_OpImpl::forward() {
+    const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
+    const auto axis = op.template getAttr<std::int64_t>("Axis");
+
+    const std::size_t axisIdx = axis>=0 ?
+                                axis :
+                                static_cast<std::size_t>(axis) + op.getInput(0)->dims().size();
+
+    std::size_t postAxisElems = 1;
+    for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) {
+        postAxisElems *= op.getInput(0)->dims()[i];
+    }
+    std::size_t preAxisElems = 1;
+    for (std::size_t i = 0; i < axisIdx; ++i) {
+        preAxisElems *= op.getInput(0)->dims()[i];
+    }
+
+    const auto indices = op.template getAttr<std::vector<std::int64_t>>("Indices");
+    std::size_t outputOffset = 0;
+    for (std::size_t i=0; i<preAxisElems; ++i)
+    {
+        for(std::size_t j=0; j<indices.size(); ++j)
+        {
+            const std::size_t idx = indices[j] >= 0 ? indices[j] : static_cast<std::size_t>(indices[j]) + op.getInput(0)->dims()[axisIdx];
+            op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset);
+            outputOffset += postAxisElems;
+        }
+    }
+}
 
 const std::string Aidge::Gather_Op::Type = "Gather";
 
-void Aidge::Gather_Op::computeOutputDims() {
+bool Aidge::Gather_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
@@ -46,10 +75,18 @@ void Aidge::Gather_Op::computeOutputDims() {
         }
 
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Gather_Op, *this, name);
+    if (Registrar<Gather_Op>::exists({name})) {
+        SET_IMPL_MACRO(Gather_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Gather_OpImpl>(*this);
+    }
     mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index 3eae49b69ce639529d49dd1c0d241f12ece5d98b..fdf3036fe7eeccb2dfd9e21faf834e27854e45f3 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -25,8 +25,8 @@ const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Inpu
     return [nbOutputs, inputIdx](const std::vector<std::vector<std::size_t>>& inputsDims) { return std::vector<std::vector<std::size_t>>(nbOutputs, inputsDims[inputIdx]); };
 }
 
-void Aidge::GenericOperator_Op::computeOutputDims() {
-    if (mComputeOutputDims) {
+bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (mForwardDims) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>());
         for (std::size_t i = 0; i < nbInputs(); ++i) {
             if (getInput(i)) {
@@ -34,23 +34,25 @@ void Aidge::GenericOperator_Op::computeOutputDims() {
             }
         }
 
-        const auto& outputsDims = mComputeOutputDims(inputsDims);
+        const auto& outputsDims = mForwardDims(inputsDims);
         AIDGE_ASSERT((outputsDims.size() == nbOutputs()), "The provided ComputeDimsFunc function returns the wrong number of outputs");
         for (std::size_t i = 0; i < nbOutputs(); ++i) {
             mOutputs[i]->resize(outputsDims[i]);
         }
+        return true;
     }
     else {
-        AIDGE_ASSERT(false, "Cannot compute output dim of a GenericOperator");
+        Log::warn("GenericOperator: cannot compute output dims, no ComputeDimsFunc function provided.");
+        return false;
     }
 }
 
-bool Aidge::GenericOperator_Op::outputDimsForwarded() const {
-    if (mComputeOutputDims) {
+bool Aidge::GenericOperator_Op::dimsForwarded() const {
+    if (mForwardDims) {
         return !(mOutputs[0]->empty());
     }
     else {
-        AIDGE_ASSERT(false, "GenericOperator cannot forward dims");
+        Log::notice("GenericOperator: not output dims forwarded, no ComputeDimsFunc function provided.");
         return false;
     }
-}
\ No newline at end of file
+}
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index 618ccc06f40da4b1f1c491487fd978da768652e4..b09426f8f835eda5600b630488ef18c5b08ba32a 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -21,18 +21,13 @@
 
 const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling";
 
-void Aidge::GlobalAveragePooling_Op::computeOutputDims() {
+bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
   // error checking
   if (!getInput(0)) {
     AIDGE_THROW_OR_ABORT(std::runtime_error,
                          "GlobalAveragePooling : The input was not connected");
   }
-  // necessary bc forward dims sometimes passes with an empty vector before
-  // doing another pass
-  else if (getInput(0)->empty()) {
-    return;
-  // computation
-  } else {
+  else if (!getInput(0)->empty()) {
     AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
                  "GlobalAveragePooling :  needs at least a 3 dimensions input, "
                  "number of input dim : {}",
@@ -43,7 +38,10 @@ void Aidge::GlobalAveragePooling_Op::computeOutputDims() {
     const std::vector<DimSize_t> out_dims{getInput(0)->dims().at(0),
                                           getInput(0)->dims().at(1)};
     mOutputs[0]->resize(out_dims);
+    return true;
   }
+
+  return false;
 }
 
 void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index f57906dd4f3564b52cde16236bda87370e8f86d7..2b8107bfc77ef70b33a97032d350a42ec5f3f466 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -13,4 +13,10 @@
 
 #include "aidge/operator/Identity.hpp"
 
-const std::string Aidge::Identity_Op::Type = "Identity";
\ No newline at end of file
+const std::string Aidge::Identity_Op::Type = "Identity";
+
+void Aidge::Identity_Op::forward() {
+    // Perform a shallow copy
+    *(mOutputs[0]) = *(mInputs[0]);
+    runHooks();
+}
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 56899875338d487294163aa018e0d98b5f7a5269..8f7548155cde4c7187f7a7fe96a44c4accd2c302 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -20,13 +20,14 @@
 
 const std::string Aidge::MatMul_Op::Type = "MatMul";
 
-void Aidge::MatMul_Op::computeOutputDims() {
+bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input. Cannot compute output dimensions for MatMul Operator.");
     }
     if (getInput(0)->empty() && getInput(1)->empty()) {
         // both inputs are scalar
         mOutputs[0]->resize({});
+        return true;
     }
     else if (!getInput(0)->empty() && !getInput(1)->empty())
     {
@@ -69,7 +70,10 @@ void Aidge::MatMul_Op::computeOutputDims() {
             outDims.push_back(dims1[dims_size-1]);
 
         mOutputs[0]->resize(outDims);
+        return true;
     }
+    
+    return false;
 }
 
 void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 6e54a234d2fc78c8e8e9a43a7528709c8e51adc4..e08b5f1054f07a9dcc1722d219ebce022f994d61 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -20,9 +20,74 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
+Aidge::Elts_t Aidge::Memorize_OpImpl::getNbRequiredData(
+    Aidge::IOIndex_t inputIdx) const
+{
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
+
+    if (scheduleStep == 0 && inputIdx == 0) {
+        // No data input is required for the initial step.
+        // Initialization data is required however.
+        return Elts_t::NoneElts();
+    }
+    else if (scheduleStep > 0 && inputIdx == 1) {
+        // No initialization data is required after the initial step.
+        return Elts_t::NoneElts();
+    }
+    else {
+        return OperatorImpl::getNbRequiredData(inputIdx);
+    }
+}
+
+Aidge::Elts_t Aidge::Memorize_OpImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
+    assert(mOp.getRawOutput(outputIdx) && "requires valid output");
+
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
+    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
+
+    if (endStep > 0 && outputIdx == 1 && scheduleStep >= endStep) {
+        return Elts_t::NoneElts();
+    }
+    else {
+        return Elts_t::DataElts(op.getOutput(outputIdx)->size());
+    }
+}
+
+void Aidge::Memorize_OpImpl::updateConsummerProducer() {
+    OperatorImpl::updateConsummerProducer();
+
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
+    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
+    AIDGE_ASSERT(endStep == 0 || scheduleStep <= endStep, "cannot update consumer producer anymore, number of cycles exceeded");
+}
+
+void Aidge::Memorize_OpImpl::forward() {
+    const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
+    const unsigned int forwardStep = op.template getAttr<MemorizeAttr::ForwardStep>();
+    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
+    AIDGE_ASSERT(endStep == 0 || forwardStep <= endStep, "cannot forward anymore, number of cycles exceeded");
+
+    if (forwardStep == 0) {
+        op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size());
+    }
+    else {
+        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+    }
+}
+
 const std::string Aidge::Memorize_Op::Type = "Memorize";
 
-void Aidge::Memorize_Op::computeOutputDims() {
+void Aidge::Memorize_Op::updateConsummerProducer() {
+    Operator::updateConsummerProducer();
+    ++this->template getAttr<MemorizeAttr::ScheduleStep>();
+    this->template getAttr<MemorizeAttr::ForwardStep>() = 0;
+}
+
+bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) {
     for (size_t i = 0; i < 2; ++i) {
         if (!getInput(i)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
@@ -34,19 +99,18 @@ void Aidge::Memorize_Op::computeOutputDims() {
     if (!(getInput(0)->empty())) {
         const auto expectedDims =  getInput(0)->dims();
         mOutputs[0]->resize(expectedDims);
+        return true;
     }
     else if (!(getInput(1)->empty())) {
         const auto expectedDims =  getInput(1)->dims();
         mOutputs[0]->resize(expectedDims);
+        return true;
     }
-}
 
-void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    mImpl = Registrar<Memorize_Op>::create({name})(*this);
-    mOutputs[0]->setBackend(name, device);
+    return false;
 }
 
-bool Aidge::Memorize_Op::outputDimsForwarded() const {
+bool Aidge::Memorize_Op::dimsForwarded() const {
     // Only check the output dims
     bool forwarded = true;
     // check outputs have been filled
@@ -56,10 +120,14 @@ bool Aidge::Memorize_Op::outputDimsForwarded() const {
     return forwarded;
 }
 
-void Aidge::Memorize_Op::updateConsummerProducer() {
-    Operator::updateConsummerProducer();
-    ++this->template getAttr<MemorizeAttr::ScheduleStep>();
-    this->template getAttr<MemorizeAttr::ForwardStep>() = 0;
+void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Memorize_Op>::exists({name})){
+        SET_IMPL_MACRO(Memorize_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Memorize_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
 }
 
 void Aidge::Memorize_Op::forward() {
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index d8776e32fca909663bafe3fae3ebf9f5616c69c9..0f635ea655676e488343bb55d9de6423a997af7d 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -12,15 +12,19 @@
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Move.hpp"
 
+void Aidge::Move_OpImpl::forward() {
+    const Move_Op& op = dynamic_cast<const Move_Op&>(mOp);
+    op.getOutput(0)->copyFrom(*(op.getInput(0)));
+}
+
 const std::string Aidge::Move_Op::Type = "Move";
 
-void Aidge::Move_Op::forward() {
-    if (mImpl) {
-        mImpl->forward();
+void Aidge::Move_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Move_Op>::exists({mInputs[0]->getImpl()->backend(), name})) {
+        SET_IMPL_MACRO(Move_Op, *this, {mInputs[0]->getImpl()->backend(), name});
     }
     else {
-        mOutputs[0]->copyFrom(*(mInputs[0]));
+        mImpl = std::make_shared<Move_OpImpl>(*this);
     }
-
-    runHooks();
+    mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index c40d7e75ee449be99aa3c96ab1b260f549f6322a..5a25e4dd447f44220dbe4124e63f567520ad8d1e 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -23,7 +23,7 @@
 
 const std::string Aidge::Mul_Op::Type = "Mul";
 
-void Aidge::Mul_Op::computeOutputDims() {
+bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
@@ -51,10 +51,13 @@ void Aidge::Mul_Op::computeOutputDims() {
             --low_id;
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
     else if (!getInput(0)->empty() && !getInput(1)->empty()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul: {} and {}", getInput(0)->dims(), getInput(1)->dims());
     }
+
+    return false;
 }
 
 void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index b85c18040ad84a1e9b1ea1f8b475c32260b6587a..2a60f580f3279170a0f1ff417cea96ae7cfa981f 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -119,7 +119,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
     if (nbInputs() != nbData()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function.");
     }
-    if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
+    if (!dimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
     }
     for (DimIdx_t i = 0; i < outputDims.size(); ++i) {
@@ -131,7 +131,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
     return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
 }
 
-void Aidge::OperatorTensor::computeOutputDims() {
+bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     bool associated = (nbInputs() > 0); // do not compute anything if no input
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
@@ -151,9 +151,11 @@ void Aidge::OperatorTensor::computeOutputDims() {
         }
         mOutputs[0]->resize(expectedDims);
     }
+
+    return associated;
 }
 
-bool Aidge::OperatorTensor::outputDimsForwarded() const {
+bool Aidge::OperatorTensor::dimsForwarded() const {
     bool forwarded = true;
     // check both inputs and outputs have been filled
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
@@ -176,4 +178,12 @@ void Aidge::OperatorTensor::setDataType(const DataType& dataType) const {
         AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
         getInput(i)->setDataType(dataType);
     }
-}
\ No newline at end of file
+}
+
+void Aidge::OperatorTensor::forward() {
+    if (!dimsForwarded()) {
+        forwardDims();
+    }
+
+    Operator::forward();
+}
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 06999e301ce0968b2d9979e47f412c02e59de3ad..18325d80a94f35878ededca839ec809000527c39 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -20,10 +20,24 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+Aidge::Elts_t Aidge::Pop_OpImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
+
+    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+    return Elts_t::DataElts(op.getInput(inputIdx)->size()
+        / op.getInput(inputIdx)->dims()[0]);
+}
+
+void Aidge::Pop_OpImpl::forward() {
+    const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+    assert(op.getInput(0) && "missing input #0");
+    const unsigned int forwardStep = op.template getAttr<PopAttr::ForwardStep>();
+    *op.getOutput(0) = op.getInput(0)->extract({forwardStep});
+}
 
 const std::string Aidge::Pop_Op::Type = "Pop";
 
-void Aidge::Pop_Op::computeOutputDims() {
+bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
@@ -32,7 +46,10 @@ void Aidge::Pop_Op::computeOutputDims() {
         auto inputDims = getInput(0)->dims();
         inputDims.erase(inputDims.begin());
         getOutput(0)->resize(inputDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Pop_Op::updateConsummerProducer() {
@@ -40,12 +57,17 @@ void Aidge::Pop_Op::updateConsummerProducer() {
     this->template getAttr<PopAttr::ForwardStep>() = 0;
 }
 
+void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Pop_Op>::exists({name})){
+        SET_IMPL_MACRO(Pop_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Pop_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
+
 void Aidge::Pop_Op::forward() {
     Operator::forward();
     ++this->template getAttr<PopAttr::ForwardStep>();
 }
-
-void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Pop_Op, *this, name);
-    mOutputs[0]->setBackend(name, device);
-}
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 30b7fec5593d418d0dcea072fe99f6119f9fb83e..42715516e6804c1a48ef848fbda8f9d596f0e69e 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -22,7 +22,7 @@
 
 const std::string Aidge::Pow_Op::Type = "Pow";
 
-void Aidge::Pow_Op::computeOutputDims() {
+bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
@@ -50,7 +50,10 @@ void Aidge::Pow_Op::computeOutputDims() {
             --low_id;
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 38bbbc14846f8f4356602b1d3a66058439bb37d0..f384c10138500f454720395e7387c331d67440b6 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -32,28 +32,12 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
       Attributes_(attr<ProdAttr::Constant>(constant))
 {
     mOutputs[0] = tensor; // copy the pointer of the Tensor
-#ifdef PYBIND
-    if(Py_IsInitialized()) {
-        auto obj = py::cast(&(*this));
-        setImpl((mOutputs[0]->hasImpl()) ?
-            (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ?
-                Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) :
-                std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) :
-            std::make_shared<OperatorImpl>(*this, ""));
-    } else {
-        setImpl((mOutputs[0]->hasImpl()) ?
-            (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ?
-                Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) :
-                std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) :
-            std::make_shared<OperatorImpl>(*this, ""));
+    if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
+        SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
+    }
+    else {
+        mImpl = std::make_shared<OperatorImpl>(*this);
     }
-#else
-    setImpl((mOutputs[0]->hasImpl()) ?
-                (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ?
-                    Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) :
-                    std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) :
-                std::make_shared<OperatorImpl>(*this, ""));
-#endif
 }
 
 /**
@@ -66,57 +50,31 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
       Attributes_(op)
 {
     mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0)));
-#ifdef PYBIND
-    if(Py_IsInitialized()) {
-            auto obj = py::cast(&(*this));
-            setImpl((mOutputs[0]->hasImpl()) ?
-                (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ?
-                    Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) :
-                    std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) :
-                std::make_shared<OperatorImpl>(*this, ""));
-        } else {
-            setImpl((mOutputs[0]->hasImpl()) ?
-                (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ?
-                    Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) :
-                    std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) :
-                std::make_shared<OperatorImpl>(*this, ""));
-        }
-#else
-    setImpl((mOutputs[0]->hasImpl()) ?
-                (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ?
-                    Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) :
-                    std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) :
-                std::make_shared<OperatorImpl>(*this, ""));
-#endif
-    // if (mOutputs[0]->hasImpl()) {
-        // if (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
-        //     setImpl(Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this));
-        // }
-        // else  {
-        //     mImpl = std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend());
-        // }
-
-    // } else {
-    //     mImpl = nullptr;
-    // }
+    if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
+        SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
+    }
+    else {
+        mImpl = std::make_shared<OperatorImpl>(*this);
+    }
 }
 
 void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-#ifdef PYBIND
-    if(Py_IsInitialized()) {
-            auto obj = py::cast(&(*this));
-            setImpl((Registrar<Producer_Op>::exists({name})) ?
-                    Registrar<Producer_Op>::create(name)(*this) :
-                    std::make_shared<OperatorImpl>(*this, ""));
-        } else {
-            setImpl((Registrar<Producer_Op>::exists({name})) ?
-                    Registrar<Producer_Op>::create(name)(*this) :
-                    std::make_shared<OperatorImpl>(*this, ""));
-        }
-#else
-    setImpl((Registrar<Producer_Op>::exists({name})) ?
-        Registrar<Producer_Op>::create(name)(*this) :
-        std::make_shared<OperatorImpl>(*this, ""));
-#endif
+    if (Registrar<Producer_Op>::exists({name})){
+        SET_IMPL_MACRO(Producer_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<OperatorImpl>(*this);
+    }
     mOutputs[0]->setBackend(name, device);
-}
\ No newline at end of file
+}
+
+void Aidge::Producer_Op::forward() {
+    if (!backend().empty()) {
+        mImpl->forward();
+    }
+    else {
+        fmt::print("Basic Producer forward() function.\n");
+    }
+
+    runHooks();
+}
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 0de676e22ec668a9b41d7d61f184465d431715a2..28e39b6d3387a0371c0505dc0a7b350e83a2bbaf 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -26,34 +26,35 @@
 
 const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 
-void Aidge::ReduceMean_Op::computeOutputDims() {
-        if (!getInput(0)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (!getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+    }
+    if (!getInput(0)->empty()) {
+        // make Axes attribute positive
+        std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
+        std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
+            if (val < 0)
+                val+=static_cast<std::int32_t>(getInput(0)->nbDims());
+        });
+        std::sort(axes.begin(), axes.end());
+
+        // build output dimensions
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
+            std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
         }
-        if (!getInput(0)->empty()) {
-            // make Axes attribute positive
-            std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
-            std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
-                if (val < 0)
-                    val+=static_cast<std::int32_t>(getInput(0)->nbDims());
-            });
-            std::sort(axes.begin(), axes.end());
-
-            // build output dimensions
-            std::vector<DimSize_t> outDims = getInput(0)->dims();
-            if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
-                std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
-            }
-            else {
-                for (auto it = axes.crbegin(); it != axes.crend(); ++it)
-                    outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
-            }
-
-            // TODO: change {1} for {} when scalar Tensors are better handled.
-            mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
-
+        else {
+            for (auto it = axes.crbegin(); it != axes.crend(); ++it)
+                outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
         }
+
+        // TODO: change {1} for {} when scalar Tensors are better handled.
+        mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
+        return true;
     }
+    return false;
+}
 
 void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(ReduceMean_Op, *this, name);
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 79cfc0659849248bac791ba5b1db25096824e928..ab53c094dac09879c1bec86509463aab2280ca92 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -23,9 +23,14 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
+void Aidge::Reshape_OpImpl::forward() {
+    const Reshape_Op& op = dynamic_cast<const Reshape_Op&>(mOp);
+    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+}
+
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
-void Aidge::Reshape_Op::computeOutputDims() {
+bool Aidge::Reshape_Op::forwardDims(bool /*allowDataDependency*/) {
     // check input has been associated
     if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
@@ -58,10 +63,18 @@ void Aidge::Reshape_Op::computeOutputDims() {
         }
 
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(Reshape_Op, *this, name);
+    if (Registrar<Reshape_Op>::exists({name})){
+        SET_IMPL_MACRO(Reshape_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Reshape_OpImpl>(*this);
+    }
     mOutputs[0]->setBackend(name, device);
-}
\ No newline at end of file
+}
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 6d2670695b2ffe9acbf09edd3e82f8549a4184f0..97ec0a5171a8f13fee0a93557b6831443f10713a 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -22,9 +22,78 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
+void Aidge::Slice_OpImpl::forward() {
+    const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp);
+    const auto inputDims = op.getInput(0)->dims();
+    auto slicedDims = op.getInput(0)->dims();
+
+    std::size_t beginning = 0;
+    DimSize_t nbAxes = op.getAttr<SliceAttr::Axes>().size();
+    for (std::size_t i = 0; i < nbAxes; ++i) {
+        // For each slice operation get the params and cast them to size_t
+        const std::int64_t axis_ = op.getAttr<SliceAttr::Axes>()[i];
+        const std::int64_t start_ = op.getAttr<SliceAttr::Starts>()[i];
+        const std::int64_t end_ = op.getAttr<SliceAttr::Ends>()[i];
+        const std::size_t axis = axis_ >= 0 ? axis_ : static_cast<std::size_t>(axis_) + inputDims.size();
+        const std::size_t start = start_ >= 0 ? start_ : start_ + inputDims[axis];
+        const std::size_t end = end_ >= 0 ? end_ : end_ + inputDims[axis];
+        std::size_t stride = 1;
+        for (std::size_t j = inputDims.size() - 1; j > axis; --j) stride *= inputDims[j];
+        beginning += start * stride;
+        const std::size_t sliceLength = end - start + 1;
+        slicedDims[axis] = sliceLength;
+    }
+
+    const std::size_t nbDims = slicedDims.size();
+
+    // for inputDims = {4,5,5,3} & slicedDims = {3,2,2,1}, substractDims = {1,5,5,3}
+    std::vector<std::size_t> substractedDims = std::vector<std::size_t>(nbDims);
+    for (std::size_t i = 0; i < nbDims; ++i) {
+        substractedDims[i] = inputDims[i] - slicedDims[i];
+    }
+
+    // for slicedDims = {3,2,2,1}, prodSlicedDims = {12,4,2,1}
+    std::vector<std::size_t> prodSlicedDims = std::vector<std::size_t>(nbDims);
+    std::vector<std::size_t> prodInputDims = std::vector<std::size_t>(nbDims + 1);
+    prodSlicedDims[nbDims - 1] = slicedDims[nbDims - 1];
+    prodInputDims[nbDims - 1] = inputDims[nbDims - 1];
+    prodInputDims[nbDims] = 1;
+    for (std::size_t i = 2; i <= nbDims; ++i) {
+        prodSlicedDims[nbDims - i] = prodSlicedDims[nbDims - i + 1] * slicedDims[nbDims - i];
+        prodInputDims[nbDims - i] = prodInputDims[nbDims - i + 1] * inputDims[nbDims - i];
+    }
+
+    std::size_t i = beginning;
+    std::size_t size = 0;
+    std::size_t offset = 0;
+    for (std::size_t j = 0; j < prodSlicedDims[0];) {
+        ++size;
+        ++i;
+        ++j;
+        bool newChunk = false;
+        for (std::size_t idx = nbDims - 1; idx > 0; --idx) {
+            if (j % prodSlicedDims[idx] == 0) {
+                i += substractedDims[idx] * prodInputDims[idx + 1];
+                newChunk = true;
+            }
+        }
+
+        if (newChunk) {
+            op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(beginning), size, offset);
+            beginning = i;
+            offset += size;
+            size = 0;
+        }
+    }
+
+    if (size > 0) {
+        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(beginning), size, offset);
+    }
+}
+
 const std::string Aidge::Slice_Op::Type = "Slice";
 
-void Aidge::Slice_Op::computeOutputDims() {
+bool Aidge::Slice_Op::forwardDims(bool /*allowDataDependency*/) {
     // check input have been associated
     if (!getInput(0) || (getInput(0)->empty())) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
@@ -50,4 +119,15 @@ void Aidge::Slice_Op::computeOutputDims() {
         outDims[axis] = sliceLength;
     }
     mOutputs[0]->resize(outDims);
+    return true;
+}
+
+void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Slice_Op>::exists({name})){
+        SET_IMPL_MACRO(Slice_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Slice_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index 285868d0e32cadf87b87e275f45521c22820a150..50e556ad97a90b7a9868594cebe350d955983fd7 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -24,7 +24,7 @@
 
 const std::string Aidge::Sub_Op::Type = "Sub";
 
-void Aidge::Sub_Op::computeOutputDims() {
+bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
@@ -52,7 +52,10 @@ void Aidge::Sub_Op::computeOutputDims() {
             --low_id;
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..08c4770e3fb43fe819a924dd963356401c3ce801
--- /dev/null
+++ b/src/operator/Transpose.cpp
@@ -0,0 +1,89 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Transpose.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int64_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::Transpose_OpImpl::forward() {
+    const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
+    const auto inputDims = op.getInput(0)->dims();
+    const auto outputDims = op.getOutput(0)->dims();
+
+    std::vector<std::size_t> outStrides(outputDims.size(), 1);
+    for (size_t i = 0; i < outputDims.size(); ++i) {
+        for (size_t j = i+1; j < outputDims.size(); ++j)
+        {
+            outStrides[i] *= outputDims[j];
+        }
+    }
+
+    std::vector<size_t> indices(outputDims.size(), 0);
+    for (size_t i = 0; i < op.getInput(0)->size(); ++i) {
+        size_t idx = 0;
+        // Permute indices based on OutputDimsOrder attr
+        for (int j = outputDims.size() -1; j >=0; --j) {
+            idx += indices[op.getAttr<std::vector<DimSize_t>>(0)[j]] * outStrides[j];
+        }
+        // Copy the value in output
+        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i), 1, idx);
+
+        // Update indices for the next iteration
+        for (int j = outputDims.size() - 1; j >= 0; --j) {
+            if (indices[j] < inputDims[j] - 1) {
+                indices[j]++;
+                break;
+            } else {
+                indices[j] = 0;
+            }
+        }
+    }
+}
+
+const std::string Aidge::Transpose_Op::Type = "Transpose";
+
+bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
+    // check input has been associated
+    if (!getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
+    }
+
+    if (!getInput(0)->empty()) {
+        const auto& outDimsOrder = getAttr<std::vector<DimSize_t>>(0);
+        std::vector<DimSize_t> outputDims;
+        for (std::size_t i = 0; i < outDimsOrder.size(); ++i) {
+            outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
+        }
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+    return false;
+}
+
+void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Transpose_Op>::exists({name})){
+        SET_IMPL_MACRO(Transpose_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Transpose_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp
index 8e27fea58014b4ec16729f3593dd656026e16826..7959e1b70acab617b9c6f92160c6d501712f5945 100644
--- a/src/recipes/HorizontalTiling.cpp
+++ b/src/recipes/HorizontalTiling.cpp
@@ -41,7 +41,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
     if (op->nbOutputs() != 1 || op->nbData() > 1) {
         AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now.");
     }
-    if (!op->outputDimsForwarded()) {
+    if (!op->dimsForwarded()) {
         AIDGE_INTERNAL_ASSERT("Dimensions must be forwarded before any tiling");
     }
     // start by doing a tiling with strict dimensions division
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 437780b959b37e0cf6b5b7796e71c9b931f25bc0..8403686d16da15e7e8ad4616029a241d6197d450 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -648,11 +648,8 @@ TEST_CASE("[GraphView] clone") {
     auto conv1 = Conv(3, 32, {3, 3}, "conv1");
     auto conv2 = Conv(32, 64, {3, 3}, "conv2");
     auto conv3 = Conv(64, 10, {1, 1}, "conv3");
-    auto g1 = std::make_shared<GraphView>("TestGraph");
+    auto g1 = Sequential({conv1, conv2, conv3});
     dataProvider->addChild(conv1, 0);
-    g1->add(conv1);
-    g1->addChild(conv2, conv1, 0);
-    g1->addChild(conv3, conv2, 0);
     g1->save("clone_g1");
 
     SECTION("Check input-output connections") {
diff --git a/unit_tests/operator/Test_ConcatImpl.cpp b/unit_tests/operator/Test_ConcatImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..184c02d5208c99b903cf838784bb14fb65799111
--- /dev/null
+++ b/unit_tests/operator/Test_ConcatImpl.cpp
@@ -0,0 +1,143 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Concat.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
+    SECTION("Concat 1D inputs") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array1D<int,2>{{ 2, 3 }});
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array1D<int,3>{{ 4, 5, 6 }});
+        std::shared_ptr<Tensor> input3 = std::make_shared<Tensor>(Array1D<int,4>{{ 7, 8, 9, 10 }});
+        std::shared_ptr<Tensor> input4 = std::make_shared<Tensor>(Array1D<int,5>{{ 11, 12, 13, 14, 15 }});
+        std::shared_ptr<Tensor> input5 = std::make_shared<Tensor>(Array1D<int,6>{{ 16, 17, 18, 19, 20, 21 }});
+
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,20>{
+            { 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20,21 }});
+
+        auto myConcat = Concat(5, 0);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        myConcat->getOperator()->associateInput(2, input3);
+        myConcat->getOperator()->associateInput(3, input4);
+        myConcat->getOperator()->associateInput(4, input5);
+        myConcat->getOperator()->setBackend("cpu");
+        myConcat->getOperator()->setDataType(DataType::Int32);
+        myConcat->forward();
+
+        std::static_pointer_cast<Tensor>(myConcat->getOperator()->getRawOutput(0))->print();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
+    }
+    SECTION("Concat 4D inputs on 1st axis") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
+            {                                       //
+                {                                   //
+                    {{20, 47},{21, 48},{22, 49}},   //
+                    {{23, 50},{24, 51},{25, 52}},   //
+                    {{26, 53},{27, 54},{28, 55}}    //
+                },                                  //
+            }                                       //
+        });                                         //
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array4D<int,2,3,3,2> {
+            {
+                {                                   //
+                    {{29, 56},{30, 57},{31, 58}},   //
+                    {{32, 59},{33, 60},{34, 61}},   //
+                    {{35, 62},{36, 63},{37, 64}}    //
+                },                                  //
+                {                                   //
+                    {{38, 65},{39, 66},{40, 67}},   //
+                    {{41, 68},{42, 69},{43, 70}},   //
+                    {{44, 71},{45, 72},{46, 73}}    //
+                }                                   //
+            }                                       //
+        });                                         //
+
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
+            {                                       //
+                {                                   //
+                    {{20, 47},{21, 48},{22, 49}},   //
+                    {{23, 50},{24, 51},{25, 52}},   //
+                    {{26, 53},{27, 54},{28, 55}}    //
+                },                                  //
+                {                                   //
+                    {{29, 56},{30, 57},{31, 58}},   //
+                    {{32, 59},{33, 60},{34, 61}},   //
+                    {{35, 62},{36, 63},{37, 64}}    //
+                },                                  //
+                {                                   //
+                    {{38, 65},{39, 66},{40, 67}},   //
+                    {{41, 68},{42, 69},{43, 70}},   //
+                    {{44, 71},{45, 72},{46, 73}}    //
+                }                                   //
+            }                                       //
+        });                                         //
+
+        auto myConcat = Concat(2, 0);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        myConcat->getOperator()->setBackend("cpu");
+        myConcat->getOperator()->setDataType(DataType::Int32);
+        myConcat->forward();
+
+        std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0)->print();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
+    }
+
+    SECTION("Concat 4D inputs on 3rd axis") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
+            {                                       //
+                {                                   //
+                    {{20, 47},{21, 48},{22, 49}},   //
+                    {{23, 50},{24, 51},{25, 52}},   //
+                    {{26, 53},{27, 54},{28, 55}}    //
+                },                                  //
+            }                                       //
+        });                                         //
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array4D<int,1,3,6,2> {
+            {
+                {                                   //
+                    {{29, 56},{30, 57},{31, 58},{38, 65},{39, 66},{40, 67}},   //
+                    {{32, 59},{33, 60},{34, 61},{41, 68},{42, 69},{43, 70}},   //
+                    {{35, 62},{36, 63},{37, 64},{44, 71},{45, 72},{46, 73}}    //
+                },
+            }
+        });
+
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,1,3,9,2> {
+            {                                                                                             //
+                {                                                                                         //
+                    {{20, 47},{21, 48},{22, 49},{29, 56},{30, 57},{31, 58},{38, 65},{39, 66},{40, 67}},   //
+                    {{23, 50},{24, 51},{25, 52},{32, 59},{33, 60},{34, 61},{41, 68},{42, 69},{43, 70}},   //
+                    {{26, 53},{27, 54},{28, 55},{35, 62},{36, 63},{37, 64},{44, 71},{45, 72},{46, 73}}    //
+                },                                                                                        //
+            }                                                                                             //
+        });                                                                                               //
+
+        auto myConcat = Concat(2, 2);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        myConcat->getOperator()->setBackend("cpu");
+        myConcat->getOperator()->setDataType(DataType::Int32);
+        myConcat->forward();
+
+        std::static_pointer_cast<Tensor>(myConcat->getOperator()->getRawOutput(0))->print();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_Div_Op.cpp b/unit_tests/operator/Test_Div_Op.cpp
index e659742c0bd200fa33b598f581cfef7b2f1e432e..d11f72474b0b70bf335dfee95d13a9b41cfe6efb 100644
--- a/unit_tests/operator/Test_Div_Op.cpp
+++ b/unit_tests/operator/Test_Div_Op.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 
 namespace Aidge {
-TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]") {
+TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
@@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
 
     /**
      * @todo Special case: scalar not handled yet by
-     * ``OperatorTensor::computeOutputDims()``
+     * ``OperatorTensor::forwardDims()``
      */
     // SECTION("Scalar / Scalar") {
     //     // input_0
@@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
     //     // input_1
     //     T1->resize({});
 
-    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE_NOTHROW(op->forwardDims());
     //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
     // }
     // SECTION("Scalar / +1-D") {
@@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
     //         }
     //         T1->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
     //         }
     //         T0->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
 
             T0->resize(dims0);
             T1->resize(dims0);
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dims0);
         }
 
@@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
             T0->resize(dims0);
             T1->resize(dims1);
 
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dimsOut);
 
             // input_0 - wrong
@@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]
             }
             T1->resize(dims1_wrong);
             REQUIRE(dims0 != dims1_wrong);
-            REQUIRE_THROWS(op->computeOutputDims());
+            REQUIRE_THROWS(op->forwardDims());
         }
     }
 }
diff --git a/unit_tests/operator/Test_GatherImpl.cpp b/unit_tests/operator/Test_GatherImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2995963a35cda5b0c5794b1d15e4064438b58ece
--- /dev/null
+++ b/unit_tests/operator/Test_GatherImpl.cpp
@@ -0,0 +1,96 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Gather.hpp"
+
+#include <memory>
+
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Gather(forward)") {
+    SECTION("2D Tensor axis 0") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> {
+            {
+                {1, 2, 3},
+                {4, 5, 6},
+                {7, 8, 9}
+            }
+        });
+        std::shared_ptr<Tensor> indexes = std::make_shared<Tensor>(Array2D<int,1,2> {
+            {
+                {1, 2}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,1,2,3> {
+            {
+                {
+                    {4, 5, 6},
+                    {7, 8, 9}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> myGather = Gather({1, 2}, {1, 2}, 0);
+        auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
+        op->associateInput(0,input);
+        // op->associateInput(1,indexes);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myGather->forward();
+        op->getOutput(0)->print();
+        expectedOutput->print();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+
+    }
+    SECTION("2D Tensor axis 1") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> {
+            {
+                {1, 2, 3},
+                {4, 5, 6},
+                {7, 8, 9}
+            }
+        });
+        std::shared_ptr<Tensor> indexes = std::make_shared<Tensor>(Array2D<int,1,2> {
+            {
+                {0, 2}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,3,1,2> {
+            {
+                {
+                    {1, 3}
+                },
+                {
+                    {4, 6}
+                },
+                {
+                    {7, 9}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> myGather = Gather({0, 2}, {1, 2}, 1);
+        auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
+        op->associateInput(0,input);
+        // op->associateInput(1,indexes);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myGather->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
index fcd8489144be121633f2b0a9601dee171e2bdb5e..d20f689aba55d8cbaef553388d4666fd6c1d7172 100644
--- a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
+++ b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
@@ -21,8 +21,8 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)",
-          "[GlobalAveragePooling][computeOutputDims]") {
+TEST_CASE("[core/operator] GlobalAveragePooling_Op(forwardDims)",
+          "[GlobalAveragePooling][forwardDims]") {
   constexpr std::uint16_t NB_TRIALS = 10;
   // Create a random number generator
   std::random_device rd;
@@ -39,7 +39,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)",
   // input_0
   std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
   SECTION("Un-connected input leads to failure.") {
-    REQUIRE_THROWS(op->computeOutputDims());
+    REQUIRE_THROWS(op->forwardDims());
   }
   op->associateInput(0, input_T);
 
@@ -49,7 +49,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)",
         const std::size_t nb_dims = 0;
         std::vector<std::size_t> dims(nb_dims);
         input_T->resize(dims);
-        REQUIRE_NOTHROW(op->computeOutputDims());
+        REQUIRE_NOTHROW(op->forwardDims());
       }
     }
     SECTION("Full tensor") {
@@ -61,7 +61,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)",
             dims[i] = dimsDist(gen);
           }
           input_T->resize(dims);
-          REQUIRE_THROWS(op->computeOutputDims());
+          REQUIRE_THROWS(op->forwardDims());
         }
       }
       SECTION("nbDim > 3") {
@@ -74,7 +74,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(computeOutputDims)",
           std::vector<DimSize_t> dims_out{dims[0], dims[1]};
           input_T->resize(dims);
           op->setInput(0, input_T);
-          REQUIRE_NOTHROW(op->computeOutputDims());
+          REQUIRE_NOTHROW(op->forwardDims());
           REQUIRE(op->getOutput(0)->dims() == dims_out);
           REQUIRE((op->getOutput(0)->dims().size()) == static_cast<size_t>(2));
         }
diff --git a/unit_tests/operator/Test_MatMul_Op.cpp b/unit_tests/operator/Test_MatMul_Op.cpp
index 6c810e675ad46cc5580bd24e57f7e7dbb84db38f..bdd1de87c27351e943c59fa616c40dc4a0001abc 100644
--- a/unit_tests/operator/Test_MatMul_Op.cpp
+++ b/unit_tests/operator/Test_MatMul_Op.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 
 namespace Aidge {
-TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutputDims]") {
+TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
     // Create a random number generator
     std::random_device rd;
     std::mt19937 gen(rd());
@@ -43,13 +43,13 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu
     //     T1->resize({});
     //     op -> associateInput(1,T1);
 
-    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE_NOTHROW(op->forwardDims());
     //     REQUIRE((op->getOutput(0)->dims()).empty());
 
     //     // input_1 - wrong
     //     T1->resize({dist(gen)});
 
-    //     REQUIRE_THROWS(op->computeOutputDims());
+    //     REQUIRE_THROWS(op->forwardDims());
     // }
 
     SECTION("1-D / N-D") {
@@ -66,26 +66,26 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu
             // input_1 - right
             T1->resize({dim0});
 
-            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE_NOTHROW(op -> forwardDims());
             REQUIRE((op->getOutput(0)->dims()).empty());
 
             // input_1 - wrong
             T1->resize({dim0+1});
 
-            REQUIRE_THROWS(op -> computeOutputDims());
+            REQUIRE_THROWS(op -> forwardDims());
         }
         SECTION("1-D / 2-D") {
             // input_1 - right
             const std::size_t dim1 = dist(gen);
             T1->resize({dim0,dim1});
 
-            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE_NOTHROW(op -> forwardDims());
             REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1}));
 
             // input_1 - wrong
             T1->resize({dim0+1,dim1});
 
-            REQUIRE_THROWS(op -> computeOutputDims());
+            REQUIRE_THROWS(op -> forwardDims());
         }
         SECTION("1-D / +2-D") {
             // input_1 - right
@@ -94,7 +94,7 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu
             const std::size_t dim3 = dist(gen);
             T1->resize({dim1,dim2,dim0,dim3});
 
-            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE_NOTHROW(op -> forwardDims());
             REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1,dim2,dim3}));
         }
     }
@@ -114,26 +114,26 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu
             // input_1 - right
             T1->resize({dim1});
 
-            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE_NOTHROW(op -> forwardDims());
             REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0}));
 
             // input_1 - wrong
             T1->resize({dim1+1});
 
-            REQUIRE_THROWS(op -> computeOutputDims());
+            REQUIRE_THROWS(op -> forwardDims());
         }
         SECTION("2-D / 2-D") {
             // input_1 - right
             const std::size_t dim2 = dist(gen);
             T1->resize({dim1, dim2});
 
-            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE_NOTHROW(op -> forwardDims());
             REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim2}));
 
             // input_1 - wrong
             T1->resize({dim1+1,dim2});
 
-            REQUIRE_THROWS(op -> computeOutputDims());
+            REQUIRE_THROWS(op -> forwardDims());
         }
         SECTION("2-D / +2-D") {
             // input_1 - right
@@ -142,13 +142,13 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu
             const std::size_t dim4 = dist(gen);
             T1->resize({dim3,dim4,dim1, dim2});
 
-            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE_NOTHROW(op -> forwardDims());
             REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim3,dim4,dim0,dim2}));
 
             // input_1 - wrong
             T1->resize({dim3,dim4,dim1+1,dim2});
 
-            REQUIRE_THROWS(op -> computeOutputDims());
+            REQUIRE_THROWS(op -> forwardDims());
         }
     }
     SECTION("+2-D / +2-D") {
@@ -169,28 +169,28 @@ TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutpu
         // 1
         const std::size_t dim5 = dist(gen);
         T1->resize({dim0,dim1,dim3,dim5});
-        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE_NOTHROW(op -> forwardDims());
         REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5}));
 
         // 2 - input_1 broadcast
         T1->resize({1,dim1,dim3,dim5});
-        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE_NOTHROW(op -> forwardDims());
         REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5}));
 
         // 3 - input_0 broadcast
         const std::size_t dim1_bigger = dist(gen) + 1;
         T1->resize({dim0,dim1_bigger,dim3,dim5});
-        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE_NOTHROW(op -> forwardDims());
         REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5}));
 
         // 4 - input_0+input_1 broadcast
         T1->resize({1,dim1_bigger,dim3,dim5});
-        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE_NOTHROW(op -> forwardDims());
         REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5}));
 
         // input_1 - wrong
         T1->resize({dim0+1,dim1,dim3,dim5});
-        REQUIRE_THROWS(op -> computeOutputDims());
+        REQUIRE_THROWS(op -> forwardDims());
     }
 }
 } // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index cd42791e0db1d95469bdd414cab94f1c6e8fea17..ed4afafe39a367ecabb25ff949eb3d03999d1ea9 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -9,6 +9,12 @@
  *
  ********************************************************************************/
 
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <string>
+#include <utility>  // std::pair
+#include <vector>
+
 #include <catch2/catch_test_macros.hpp>
 
 #include "aidge/operator/Pop.hpp"
@@ -17,7 +23,6 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Testing.hpp"
 #include "aidge/recipes/Recipes.hpp"
-#include <cstddef>
 
 using namespace Aidge;
 
@@ -37,13 +42,12 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         REQUIRE(op->nbData() == 1);
         REQUIRE(op->nbOutputs() == 1);
 
-        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>();
-        myInput->resize({2,3,5,5});
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(std::vector<std::size_t>({2,1,5,5}));
         std::shared_ptr<OperatorTensor> opTensor = std::static_pointer_cast<OperatorTensor>(op->getOperator());
         opTensor->associateInput(0,myInput);
-        opTensor->computeOutputDims();
+        opTensor->forwardDims();
 
-        REQUIRE(opTensor->outputDimsForwarded());
+        REQUIRE(opTensor->dimsForwarded());
         REQUIRE(std::static_pointer_cast<Tensor>(opTensor->getRawOutput(0))->dims() == std::vector<size_t>({2,3,5,5}));
         REQUIRE(std::static_pointer_cast<Tensor>(opTensor->getRawInput(0)) == myInput);
         REQUIRE(microGraph->getOrderedInputs()[0].first->getOperator()->getRawInput(0) == myInput);
@@ -74,9 +78,9 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         op->associateInput(17, myInit);
         op->associateInput(18, myInit);
 
-        op->computeOutputDims();
+        op->forwardDims();
         microGraph->save("lstm_dims", true, true);
-        REQUIRE(op->outputDimsForwarded());
+        REQUIRE(op->dimsForwarded());
 
         //op->updateConsummerProducer();  // require implementation
         //auto microGraphScheduler = std::dynamic_pointer_cast<MetaOperator_Op>(op)->getMicroGraphScheduler();
diff --git a/unit_tests/operator/Test_Mul_Op.cpp b/unit_tests/operator/Test_Mul_Op.cpp
index d3e0c5e086fac9d31db817d628214e95d4e41a32..f3f8fb9522943d0a9574cb80cfc228135a973890 100644
--- a/unit_tests/operator/Test_Mul_Op.cpp
+++ b/unit_tests/operator/Test_Mul_Op.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 
 namespace Aidge {
-TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]") {
+TEST_CASE("[core/operator] Mul_Op(forwardDims)", "[Mul][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
@@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
 
     /**
      * @todo Special case: scalar not handled yet by
-     * ``OperatorTensor::computeOutputDims()``
+     * ``OperatorTensor::forwardDims()``
      */
     // SECTION("Scalar / Scalar") {
     //     // input_0
@@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
     //     // input_1
     //     T1->resize({});
 
-    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE_NOTHROW(op->forwardDims());
     //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
     // }
     // SECTION("Scalar / +1-D") {
@@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
     //         }
     //         T1->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
     //         }
     //         T0->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
 
             T0->resize(dims0);
             T1->resize(dims0);
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dims0);
         }
 
@@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
             T0->resize(dims0);
             T1->resize(dims1);
 
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dimsOut);
 
             // input_0 - wrong
@@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]
             }
             T1->resize(dims1_wrong);
             REQUIRE(dims0 != dims1_wrong);
-            REQUIRE_THROWS(op->computeOutputDims());
+            REQUIRE_THROWS(op->forwardDims());
         }
     }
 }
diff --git a/unit_tests/operator/Test_Pow_Op.cpp b/unit_tests/operator/Test_Pow_Op.cpp
index c77615c11e99c174707df21560044fdd3b6a3c42..4a8d242a355cda58c7b36914efdb1304220f713a 100644
--- a/unit_tests/operator/Test_Pow_Op.cpp
+++ b/unit_tests/operator/Test_Pow_Op.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 
 namespace Aidge {
-TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]") {
+TEST_CASE("[core/operator] Pow_Op(forwardDims)", "[Pow][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
@@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
 
     /**
      * @todo Special case: scalar not handled yet by
-     * ``OperatorTensor::computeOutputDims()``
+     * ``OperatorTensor::forwardDims()``
      */
     // SECTION("Scalar / Scalar") {
     //     // input_0
@@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
     //     // input_1
     //     T1->resize({});
 
-    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE_NOTHROW(op->forwardDims());
     //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
     // }
     // SECTION("Scalar / +1-D") {
@@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
     //         }
     //         T1->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
     //         }
     //         T0->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
 
             T0->resize(dims0);
             T1->resize(dims0);
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dims0);
         }
 
@@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
             T0->resize(dims0);
             T1->resize(dims1);
 
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dimsOut);
 
             // input_0 - wrong
@@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]
             }
             T1->resize(dims1_wrong);
             REQUIRE(dims0 != dims1_wrong);
-            REQUIRE_THROWS(op->computeOutputDims());
+            REQUIRE_THROWS(op->forwardDims());
         }
     }
 }
diff --git a/unit_tests/operator/Test_ReshapeImpl.cpp b/unit_tests/operator/Test_ReshapeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5d28005eb40534742aae495948e5269373b81ad1
--- /dev/null
+++ b/unit_tests/operator/Test_ReshapeImpl.cpp
@@ -0,0 +1,67 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Reshape.hpp"
+
+#include <memory>
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Reshape(forward)") {
+    SECTION("1D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array1D<float,6> {
+            {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,3> {
+            {
+                {1.0, 2.0, 3.0},
+                {4.0, 5.0, 6.0}
+            }
+        });
+
+        std::shared_ptr<Node> myReshape = Reshape({2, 3});
+        auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+        op->associateInput(0, input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        myReshape->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+    }
+    SECTION("2D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,3> {
+            {
+                {1.0, 2.0, 3.0},
+                {4.0, 5.0, 6.0}
+            }
+
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,3,2> {
+            {
+                {1.0, 2.0},
+                {3.0, 4.0},
+                {5.0, 6.0}
+            }
+        });
+
+        std::shared_ptr<Node> myReshape = Reshape({3, 2});
+        auto op = std::static_pointer_cast<OperatorTensor>(myReshape -> getOperator());
+        op->associateInput(0, input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        myReshape->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_SliceImpl.cpp b/unit_tests/operator/Test_SliceImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..91ae92848b552a6038a4cb5f8dd3848b20ac2168
--- /dev/null
+++ b/unit_tests/operator/Test_SliceImpl.cpp
@@ -0,0 +1,160 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Slice.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
+    SECTION("1D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
+            {0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,4> {
+            {0, 1, 2,-3}
+        });
+
+        std::shared_ptr<Node> mySlice = Slice({0}, {3}, {0});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("2D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array2D<int,2,10> {
+            {
+                { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<int,2,3> {
+            {
+                {-5,-6, 7},
+                {-5,-6, 7}
+            }
+        });
+
+        std::shared_ptr<Node> mySlice = Slice({0,5}, {1,7}, {0,1});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+        // mySlice->getOperator()->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("3D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<int,2,2,10> {
+            {
+                {
+                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                },
+                {
+                    { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                    {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,1,1,3> {
+            {
+                {
+                    { 4,-5,-6}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> mySlice = Slice({0,1,4}, {0,1,6}, {0,1,2});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+        // mySlice->getOperator()->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("4D Tensor") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
+            {
+                {
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    }
+                },
+                {
+                    {
+                        { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3,11,-5,-6, 7,-1,10}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
+            {
+                {
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    }
+                },
+                {
+                    {
+                        { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3,11,-5,-6, 7,-1,10}
+                    }
+                }
+            }
+        });
+
+        std::shared_ptr<Node> mySlice = Slice({0,0,0,0}, {1,1,1,9}, {0,1,2,3});
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+        // mySlice->getOperator()->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+}
diff --git a/unit_tests/operator/Test_Sub_Op.cpp b/unit_tests/operator/Test_Sub_Op.cpp
index b7b744410d31ea32dea5a15cc7a29da093488d14..329f3da798854ddff3d1c1393d60c57ef180c70a 100644
--- a/unit_tests/operator/Test_Sub_Op.cpp
+++ b/unit_tests/operator/Test_Sub_Op.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 
 namespace Aidge {
-TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]") {
+TEST_CASE("[core/operator] Sub_Op(forwardDims)", "[Sub][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
@@ -42,7 +42,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
 
     /**
      * @todo Special case: scalar not handled yet by
-     * ``OperatorTensor::computeOutputDims()``
+     * ``OperatorTensor::forwardDims()``
      */
     // SECTION("Scalar / Scalar") {
     //     // input_0
@@ -51,7 +51,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
     //     // input_1
     //     T1->resize({});
 
-    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE_NOTHROW(op->forwardDims());
     //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
     // }
     // SECTION("Scalar / +1-D") {
@@ -69,7 +69,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
     //         }
     //         T1->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -88,7 +88,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
     //         }
     //         T0->resize(dims);
 
-    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE_NOTHROW(op->forwardDims());
     //         REQUIRE((op->getOutput(0)->dims()) == dims);
     //     }
     // }
@@ -103,7 +103,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
 
             T0->resize(dims0);
             T1->resize(dims0);
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dims0);
         }
 
@@ -126,7 +126,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
             T0->resize(dims0);
             T1->resize(dims1);
 
-            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE_NOTHROW(op->forwardDims());
             REQUIRE((op->getOutput(0)->dims()) == dimsOut);
 
             // input_0 - wrong
@@ -137,7 +137,7 @@ TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]
             }
             T1->resize(dims1_wrong);
             REQUIRE(dims0 != dims1_wrong);
-            REQUIRE_THROWS(op->computeOutputDims());
+            REQUIRE_THROWS(op->forwardDims());
         }
     }
 }
diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8b6eafc70b7eefec6e1ccab9d0cfcde1eb4a09d5
--- /dev/null
+++ b/unit_tests/operator/Test_TransposeImpl.cpp
@@ -0,0 +1,123 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Transpose.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Transpose(forward)") {
+    SECTION("3D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> {
+            {
+                {{0.42507452, 0.11244237, 0.43243718, 0.62354952},
+                {0.90250170, 0.48719984, 0.45781207, 0.92536664},
+                {0.06348717, 0.91678733, 0.64452291, 0.00484818}},
+
+                {{0.66873497, 0.99508536, 0.55714869, 0.84887981},
+                {0.41666120, 0.92365038, 0.80034822, 0.38721532},
+                {0.52037925, 0.53937608, 0.66380072, 0.36330253}}
+            }
+        });
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array3D<float,2,4,3> { 
+            {
+                {{0.42507452, 0.90250170, 0.06348717},
+                {0.11244237, 0.48719984, 0.91678733},
+                {0.43243718, 0.45781207, 0.64452291},
+                {0.62354952, 0.92536664, 0.00484818}},
+
+                {{0.66873497, 0.41666120, 0.52037925},
+                {0.99508536, 0.92365038, 0.53937608},
+                {0.55714869, 0.80034822, 0.66380072},
+                {0.84887981, 0.38721532, 0.36330253}}
+            }
+        });
+        std::shared_ptr<Node> myTranspose = Transpose({0,2,1});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        myTranspose->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *output);
+    }
+    SECTION("4D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,2,3,1,4> {
+            {
+                {
+                    {
+                        {1, 2, 3, 4}
+                    },
+                    {
+                        {5, 6, 7, 8}
+                    },
+                    {
+                        {9, 10, 11, 12}
+                    }
+                },
+                {
+                    {
+                        {13, 14, 15, 16}
+                    },
+                    {
+                        {17, 18, 19, 20}
+                    },
+                    {
+                        {21, 22, 23, 24}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array4D<int,2,4,1,3> { 
+            {
+                {
+                    {
+                        {1, 5, 9}
+                    },
+                    {
+                        {2, 6, 10}
+                    },
+                    {
+                        {3, 7, 11}
+                    },
+                    {
+                        {4, 8, 12}
+                    }
+                },
+                {
+                    {
+                        {13, 17, 21}
+                    },
+                    {
+                        {14, 18, 22}
+                    },
+                    {
+                        {15, 19, 23}
+                    },
+                    {
+                        {16, 20, 24}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Node> myTranspose = Transpose({0,3,2,1});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myTranspose->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *output);
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp
index e2c1a8fcb96256fa8c3f26a3495913bd987de2d4..ceaa5e301c820ef54970a0e76004ad3467ae66da 100644
--- a/unit_tests/scheduler/Test_Scheduler.cpp
+++ b/unit_tests/scheduler/Test_Scheduler.cpp
@@ -54,7 +54,7 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") {
       if (unicity1) {
         for (auto &node : g1->getNodes()) {
           std::static_pointer_cast<GenericOperator_Op>(node->getOperator())
-              ->setComputeOutputDims(
+              ->setForwardDims(
                   GenericOperator_Op::InputIdentity(0, node->nbOutputs()));
         }
 
@@ -97,7 +97,7 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") {
     //   if (unicity1) {
     //     for (auto &node : g1->getNodes()) {
     //       std::static_pointer_cast<GenericOperator_Op>(node->getOperator())
-    //           ->setComputeOutputDims(
+    //           ->setForwardDims(
     //               GenericOperator_Op::InputIdentity(0, node->nbOutputs()));
     //     }