diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index e4d2cb4faca3dda64cff6aea541c30787c23d0ad..e7b16963f4c26e5d014ce90fa289c043e2eb0be4 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -27,8 +27,8 @@ class test_attributes(unittest.TestCase):
         out_channels = 8
         k_dims = [2, 2]
         conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator()
-        self.assertEqual(conv_op.get_attr("InChannels"), in_channels)
-        self.assertEqual(conv_op.get_attr("OutChannels"), out_channels)
+        self.assertEqual(conv_op.in_channels(), in_channels)
+        self.assertEqual(conv_op.out_channels(), out_channels)
         self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
 
     def test_fc(self):
@@ -36,7 +36,7 @@ class test_attributes(unittest.TestCase):
         out_channels = 8
         nb_bias = True
         fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator()
-        self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
+        self.assertEqual(fc_op.out_channels(), out_channels)
         self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
 
     def test_producer_1D(self):
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 931b1b26a04e8886c211d77f8b0147c2140d350a..940440bad52e367fe04872a308c99e4c802fa242 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -59,9 +59,11 @@
 #include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Reshape.hpp"
+#include "aidge/operator/Shape.hpp"
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/operator/Slice.hpp"
 #include "aidge/operator/Softmax.hpp"
+#include "aidge/operator/Split.hpp"
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/operator/Sub.hpp"
 #include "aidge/operator/Transpose.hpp"
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index f3fa4ef5164a2eed7caaa7baa7f83e7ed00403b8..e11a6d26fd8d2977cbee39719ce32c8bf98cb057 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -182,6 +182,17 @@ public:
     */
     inline std::size_t size() const noexcept { return mNbElts; }
 
+    /**
+     * @brief Return the current capacity of the tensor, i.e. the actual memory
+     * currently being allocated. It can be different from the size:
+     * - Capacity can be 0 if the tensor memory was not yet initialized (because
+     *   of lazy initialization, memory is allocated only when it needs to be
+     *   accessed the first time).
+     * - Capacity can be > size if the tensor was downsized but memory was not
+     *   reallocated.
+    */
+    virtual std::size_t capacity() const noexcept = 0;
+
     /**
      * @brief Return the size (in bytes) of one element (scalar).
     */
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 6434d23f1266bba5d4e7ae890dca2c2577e403b5..526a2dd2eec5e7ed1d2736a5c0ab9c9065622ad7 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -43,6 +43,8 @@ public:
         return std::make_shared<TensorImpl_cpu<T>>(device, dims);
     }
 
+    inline std::size_t capacity() const noexcept override final { return mData.size(); }
+
     inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
 
     void zeros() override final;
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index c9a4c11d780a41a1620518047d66a7de2d7b55fa..627e78790020c04d50f839f01de2130ba8d8d774 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -208,7 +208,12 @@ public:
 
     /**
      * @brief Compute dimensions of input/output Tensors for each Operator of the
-     * GraphView object's Nodes.
+     * GraphView object's Nodes, by calling Node::forwardDims().
+     * This function verifies the following conditions:
+     * - Every node will forwardDims() regardless of if dims were previously forwarded or not;
+     * - forwadDims() calls are made in node dependencies order, because if dims have changed 
+     *   at any point in the graph, it must de propagated correctly to all succeeding nodes;
+     * - It handles cyclic dependencies correctly (currently only induced by the Memorize_Op).
      */
     bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false);
 
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index af2993d67f16df498f13a0489a3837a8f9fc4a75..9a9fced142ebc345c095c1eeca6b9a6c4270cf36 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -102,13 +102,14 @@ inline std::shared_ptr<Node> AvgPooling(
     return AvgPooling(to_array(kernel_dims), name, stride_dims);
 }
 
+
+}  // namespace Aidge
+
 extern template class Aidge::AvgPooling_Op<1>;
 extern template class Aidge::AvgPooling_Op<2>;
 extern template class Aidge::AvgPooling_Op<3>;
 extern template class Aidge::AvgPooling_Op<4>;
 
-}  // namespace Aidge
-
 namespace {
 template <>
 const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index d6a0df5ab472c4a728e5b5042258d6d2bd34f871..c30282f3438889e233f3d9ed22ab7c7e795b2951 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -30,7 +30,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, NoBias };
+enum class ConvAttr { StrideDims, DilationDims, KernelDims, NoBias };
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
@@ -38,8 +38,6 @@ class Conv_Op : public OperatorTensor,
                 public StaticAttributes<ConvAttr,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
-                                        DimSize_t,
-                                        DimSize_t,
                                         std::array<DimSize_t, DIM>,
                                         bool> {
 
@@ -51,24 +49,20 @@ public:
     using Attributes_ = StaticAttributes<ConvAttr,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
-                                        DimSize_t,
-                                        DimSize_t,
                                         std::array<DimSize_t, DIM>,
                                         bool>;
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr Conv_Op(DimSize_t inChannels,
-                      DimSize_t outChannels,
-                      const std::array<DimSize_t, DIM> &kernelDims,
+    constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                       const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
                       bool noBias = false)
         : OperatorTensor(Type, 1, 2, 1),
           Attributes_(attr<ConvAttr::StrideDims>(strideDims),
                       attr<ConvAttr::DilationDims>(dilationDims),
-                      attr<ConvAttr::InChannels>(inChannels),
-                      attr<ConvAttr::OutChannels>(outChannels),
+                    //   attr<ConvAttr::InChannels>(inChannels),
+                    //   attr<ConvAttr::OutChannels>(outChannels),
                       attr<ConvAttr::KernelDims>(kernelDims),
                       attr<ConvAttr::NoBias>(noBias)) {}
 
@@ -76,16 +70,7 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Conv_Op(const Conv_Op<DIM>& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Conv_Op(const Conv_Op<DIM>& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -108,115 +93,28 @@ public:
 
     // }
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        // check inputs have been associated
-        bool associated = true;
-        for (IOIndex_t i = 0; i < 3; ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
-            AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
-                     (getInput(0)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()),
-                     "Wrong input size for Conv operator.");
-            AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)) &&
-                        (getInput(1)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()) &&
-                        (getInput(1)->template dims<DIM+2>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
-                        "Wrong weight size for Conv operator.");
-            if(!this->template getAttr<ConvAttr::NoBias>())
-                AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
-                        (getInput(2)->template dims<1>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
-                        "Wrong bias size for Conv operator.");
-            std::array<DimSize_t, DIM + 2> outputDims{};
-            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-
-            for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
-                const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
-                                                       (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
-                                               1;
-
-                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                              static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
-            }
-
-            outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-        }
-
-        return associated;
-    }
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
+    std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
     computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
                           const std::vector<DimSize_t>& outputDims,
-                          const IOIndex_t outputIdx = 0) const override {
-        if (outputIdx != 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
-        }
-        if (firstEltDims.size() != outputDims.size()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
-        }
-        if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
-            // Offset
-            auto inputIdxDims = firstEltDims; // batch idx is the same
-            inputIdxDims[1] = 0; // each channel is used so start with the first one
-
-            for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-                if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
-                }
-            }
+                          const IOIndex_t outputIdx = 0) const override;
 
-            // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
-            // Input
-            // same batch value, every input channel is used
-            std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
-            for (DimIdx_t i = 0; i < DIM; ++i) {
-                inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                            * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
-                            + 1
-                            + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                            * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-                inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
-            }
 
-            // Weight
-            // same output value, every input channel is used
-            std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
-            for (std::size_t i = 0; i < DIM; ++i) {
-                weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
-            }
-            std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
-            weightIdxDims[0] = firstEltDims[1];
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
-            // Result
-            std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
-
-            // Bias
-            if (! this->template getAttr<ConvAttr::NoBias>()){
-                const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
-                const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
-                res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
-            }
-            return res;
+    DimSize_t inChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
         }
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+        return getInput(1)->template dims<DIM+2>()[1];
     }
 
-
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-
-        // By default, automatically set backend for weight and bias inputs
-        getInput(1)->setBackend(name, device);
-        getInput(2)->setBackend(name, device);
+    DimSize_t outChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed.");
+        }
+        return getInput(1)->template dims<DIM+2>()[0];
     }
 
     static const std::vector<std::string> getInputsName(){
@@ -227,8 +125,6 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string Conv_Op<DIM>::Type = "Conv";
 
 /**
  * @brief Perform a convolution on the input Tensor.
@@ -252,7 +148,7 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
                                   bool noBias = false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims, noBias), name);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
     addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
     addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
 
@@ -274,13 +170,13 @@ inline std::shared_ptr<Node> Conv(
 }
 }  // namespace Aidge
 
+extern template class Aidge::Conv_Op<2>;
+
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
     "StrideDims",
     "DilationDims",
-    "InChannels",
-    "OutChannels",
     "KernelDims",
     "NoBias"
 };
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 2337ff66f00b932a190d5b1735d53df3da8ffdbf..7091421720aaf4291198823a6d7dcd732a8d9f99 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -29,7 +29,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, NoBias };
+enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims, NoBias };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
@@ -37,7 +37,6 @@ class ConvDepthWise_Op : public OperatorTensor,
                 public StaticAttributes<ConvDepthWiseAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
-                                       DimSize_t,
                                        std::array<DimSize_t, DIM>,
                                        bool> {
 public:
@@ -48,21 +47,18 @@ public:
     using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
-                                             DimSize_t,
                                              std::array<DimSize_t, DIM>,
                                              bool>;
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr ConvDepthWise_Op(const DimSize_t nbChannels,
-                               const std::array<DimSize_t, DIM> &kernel_dims,
+    constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
                                bool no_bias=false)
         : OperatorTensor(Type, 1, 2, 1),
           Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
                       attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-                      attr<ConvDepthWiseAttr::Channels>(nbChannels),
                       attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
                       attr<ConvDepthWiseAttr::NoBias>(no_bias)) {}
 
@@ -70,16 +66,7 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
@@ -90,105 +77,20 @@ public:
     }
 
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        // check inputs have been associated
-        // TODO : add a check of inputs dimensions ?
-        bool associated = true;
-        for (IOIndex_t i = 0; i < 3; ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
-            std::array<DimSize_t, DIM + 2> outputDims = {};
-            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-
-            for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
-                const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
-                                                       (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
-                                               1;
-
-                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                        floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                              static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
-            }
-            // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
-            // if (mInputs[1]->empty()) {
-            //     mInputs[1]->resize(weightDims);
-            // }
-            // if (mInputs[2]->empty()) {
-            //     mInputs[2]->resize({mInputs[0]->dims()[1]});
-            // }
-            outputDims[1] = inputDims[1];
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-        }
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-        return associated;
-    }
-
-    std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
-        if (outputIdx != 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
-        }
-        if (firstEltDims.size() != outputDims.size()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
-        }
-        if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
-            // Offset
-            auto inputIdxDims = firstEltDims; // batch idx is the same
-
-            for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-                if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
-                }
-            }
-
-            // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
-            // Input
-            // same batch value
-            std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
-            for (DimIdx_t i = 0; i < DIM; ++i) {
-                inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                            * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
-                            + 1
-                            + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                            * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-                inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
-            }
-
-            // Weight
-            std::vector<DimSize_t> weightDims{outputDims[1], 1};
-            for (std::size_t i = 0; i < DIM; ++i) {
-                weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
-            }
-            std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
-            weightIdxDims[0] = firstEltDims[1];
-
-
-            // Result
-            std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
-            res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
-            // Bias
-            if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
-                const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
-                const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
-                res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
-            }
-            return res;
-        }
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
-    }
+    std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
+    computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
+                          const std::vector<DimSize_t>& outputDims,
+                          const IOIndex_t outputIdx = 0) const override;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
-        // By default, automatically set backend for weight and bias inputs
-        getInput(1)->setBackend(name, device);
-        getInput(2)->setBackend(name, device);
+    DimSize_t nbChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of channel imposed.");
+        }
+        return getInput(1)->template dims<DIM+2>()[0];
     }
 
     static const std::vector<std::string> getInputsName(){
@@ -199,9 +101,6 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
-
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            const std::array<DimSize_t, DIM> &kernelDims,
@@ -211,7 +110,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            bool noBias=false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims, noBias), name);
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
     addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
     addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
     return convDW;
@@ -231,9 +130,11 @@ inline std::shared_ptr<Node> ConvDepthWise(
 }
 }  // namespace Aidge
 
+extern template class Aidge::ConvDepthWise_Op<2>;
+
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels",
+const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims",
                                                           "KernelDims", "NoBias"};
 }
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index b97874f4e0deafd685453b3ce9865e65fafe7561..9f10970c4fd5b21a1cb92b334167d353f066e05b 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -24,26 +24,24 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class FCAttr { OutChannels, NoBias };
+enum class FCAttr { NoBias };
 
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
                                  std::shared_ptr<OperatorImpl>(const FC_Op &)>,
-              public StaticAttributes<FCAttr, DimSize_t, bool> {
+              public StaticAttributes<FCAttr, bool> {
 public:
     static const std::string Type;
 
     FC_Op() = delete;
 
-    using Attributes_ = StaticAttributes<FCAttr, DimSize_t, bool>;
+    using Attributes_ = StaticAttributes<FCAttr, bool>;
     template <FCAttr e> using attr = typename Attributes_::template attr<e>;
 
-    FC_Op(DimSize_t out_channels, bool noBias)
+    FC_Op(bool noBias)
     : OperatorTensor(Type, 1, 2, 1),
-      Attributes_(
-        attr<FCAttr::OutChannels>(out_channels),
-        attr<FCAttr::NoBias>(noBias))
+      Attributes_(attr<FCAttr::NoBias>(noBias))
     {}
 
     /**
@@ -75,6 +73,13 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    DimSize_t outChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Fully Connected (FC) operator has no weight Tensor associated so no specific number of output channel imposed.");
+        }
+        return getInput(1)->template dims<2>()[0];
+    }
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "weight", "bias"};
     }
@@ -83,9 +88,9 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
+inline std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(outChannels, noBias), name);
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), name);
     addProducer(fc, 1, {outChannels, inChannels}, "w");
     addProducer(fc, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
     return fc;
@@ -94,8 +99,7 @@ inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, boo
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
-                                                        "NoBias"};
+const char *const EnumStrings<Aidge::FCAttr>::data[] = {"NoBias"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 367aa4e2d68fb1095b1e3b3be76f6ab59439e47f..bcbe1c6c69e0a666d7a976558d558f101c5b8fca 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -27,6 +27,8 @@
 
 namespace Aidge {
 
+
+
 /**
  * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
  * This Operator has no Implementation, it just forward its input Tensor.
@@ -63,7 +65,7 @@ public:
         return std::make_shared<Identity_Op>(*this);
     }
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
+    // bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
 
     /**
      * @brief Check if output dimensions have been computed.
@@ -74,7 +76,7 @@ public:
      * @return false Input has no dimensions or is a nullptr.
      */
     bool dimsForwarded() const override final {
-        return mInputs[0] ? !mInputs[0]->empty() : false;
+        return mInputs[0] ? (mInputs[0]->empty() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
     }
 
 
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index a411101618a5f4acaf070516d67691a6b55e3ff5..fb8c73af33dd081664c82427ea8aa6876117d695 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -72,7 +72,6 @@ public:
 
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
     void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
-    void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final;
 
     bool forwardDims(bool allowDataDependency = false) override final {
         // Check first that all required inputs are available, otherwise
@@ -118,7 +117,7 @@ public:
     void updateConsummerProducer() override;
     void forward() override;
     void backward() override {
-        assert(false && "not implemented");
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for a MetaOperator");
     }
 
     inline bool isAtomic() const noexcept override final { return false; }
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index fb3aa6384fc703d758cb8753dcf54c4694f96bd4..eb57761cc5927cb4eedfb6cb12b1d49a0ee50b9c 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -12,22 +12,26 @@
 #ifndef AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
 #define AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
 
+#include <array>
+#include <memory>
+#include <string>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp" // Sequential
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/Pad.hpp"
-#include "aidge/operator/Memorize.hpp"
-#include "aidge/operator/Add.hpp"
-#include "aidge/operator/Mul.hpp"
-#include "aidge/operator/FC.hpp"
-#include "aidge/operator/Identity.hpp"
-#include "aidge/operator/Concat.hpp"
-#include "aidge/operator/Tanh.hpp"
 #include "aidge/operator/Sigmoid.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
 
 namespace Aidge {
+
+
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
                                   DimSize_t out_channels,
@@ -40,7 +44,7 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
@@ -48,6 +52,20 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
     return metaOp;
 }
 
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
+                                  bool no_bias = false)
+{
+    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), "");
+
+    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
+}
+
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> PaddedConv(
@@ -63,6 +81,8 @@ inline std::shared_ptr<Node> PaddedConv(
     return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
 }
 
+////////////////////////////////////////////////////////////////////////////////
+
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
@@ -74,7 +94,7 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
@@ -82,6 +102,20 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
     return metaOp;
 }
 
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
+                                  const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
+                                  bool no_bias = false)
+{
+    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), "");
+
+    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
+}
+
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> PaddedConvDepthWise(
@@ -96,30 +130,29 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(
     return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
 }
 
+////////////////////////////////////////////////////////////////////////////////
+
+
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+extern std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
-{
-    auto graph = Sequential({
-        Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
-        AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
-    });
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
 
-    return MetaOperator("PaddedAvgPooling", graph, name);
-}
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+extern std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
-inline std::shared_ptr<Node> PaddedAvgPooling(
-    DimSize_t const (&kernel_dims)[DIM],
+extern std::shared_ptr<Node> PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-    const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0))
-{
-    return PaddedAvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
-}
+    const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
+
+////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
@@ -136,6 +169,20 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
     return MetaOperator("PaddedMaxPooling", graph, name);
 }
 
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
+                                  bool ceil_mode = false)
+{
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, ""),
+        MaxPooling(kernel_dims, "", stride_dims, ceil_mode)
+    });
+
+    return std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph);
+}
+
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> PaddedMaxPooling(
@@ -148,115 +195,17 @@ inline std::shared_ptr<Node> PaddedMaxPooling(
     return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode);
 }
 
-inline std::shared_ptr<Node> LSTM(DimSize_t in_channels,
-                                  DimSize_t hidden_channels,
-                                  DimSize_t seq_length,
-                                  bool noBias = false,
-                                  const std::string& name = "")
-{
-    // Construct micro-graph
-    auto input = Identity((!name.empty()) ? name + "_input" : "");
-    auto hiddenState = Memorize(seq_length, (!name.empty()) ? name + "_hidden_state" : "");
-    auto cellState = Memorize(seq_length, (!name.empty()) ? name + "_cell_state" : "");
-    auto add = Add(2, (!name.empty()) ? name + "_add" : "");
-
-    // Forget gate
-    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_forgetGateX" : "");
-    input->addChild(forgetGateX, 0, 0);
-    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_forgetGateH" : "");
-    hiddenState->addChild(forgetGateH, 1, 0);
-    auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : "");
-    forgetGateX->addChild(forgetGate, 0, 0);
-    forgetGateH->addChild(forgetGate, 0, 1);
-    auto forgetGateAct = Sigmoid((!name.empty()) ? name + "_forgetGateAct" : "");
-    auto forgetGateMul = Mul((!name.empty()) ? name + "_forgetGateMul" : "");
-    forgetGate->addChild(forgetGateAct, 0, 0);
-    forgetGateAct->addChild(forgetGateMul, 0, 0);
-    forgetGateMul->addChild(add, 0, 0);
-    cellState->addChild(forgetGateMul, 1, 1);
-
-    // Input gate
-    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_inputGateX" : "");
-    input->addChild(inputGateX, 0, 0);
-    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_inputGateH" : "");
-    hiddenState->addChild(inputGateH, 1, 0);
-    auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : "");
-    inputGateX->addChild(inputGate, 0, 0);
-    inputGateH->addChild(inputGate, 0, 1);
-    auto inputGateAct = Sigmoid((!name.empty()) ? name + "_inputGateAct" : "");
-    auto inputGateMul = Mul((!name.empty()) ? name + "_inputGateMul" : "");
-    inputGate->addChild(inputGateAct, 0, 0);
-    inputGateAct->addChild(inputGateMul, 0, 0);
-    inputGateMul->addChild(add, 0, 1);
-
-    // Candidate for cell update
-    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_cellCandidateX" : "");
-    input->addChild(cellCandidateX, 0, 0);
-    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_cellCandidateH" : "");
-    hiddenState->addChild(cellCandidateH, 1, 0);
-    auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : "");
-    cellCandidateX->addChild(cellCandidate, 0, 0);
-    cellCandidateH->addChild(cellCandidate, 0, 1);
-    auto cellCandidateAct = Tanh((!name.empty()) ? name + "_cellCandidateAct" : "");
-    cellCandidate->addChild(cellCandidateAct, 0, 0);
-    cellCandidateAct->addChild(inputGateMul, 0, 1);
-
-    // Output gate
-    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_outputGateX" : "");
-    input->addChild(outputGateX, 0, 0);
-    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_outputGateH" : "");
-    hiddenState->addChild(outputGateH, 1, 0);
-    auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : "");
-    outputGateX->addChild(outputGate, 0, 0);
-    outputGateH->addChild(outputGate, 0, 1);
-    auto outputGateAct = Sigmoid((!name.empty()) ? name + "_outputGateAct" : "");
-    auto outputGateMul = Mul((!name.empty()) ? name + "_outputGateMul" : "");
-    outputGate->addChild(outputGateAct, 0, 0);
-    outputGateAct->addChild(outputGateMul, 0, 0);
-
-    // Updated cell state to help determine new hidden state
-    auto cellUpdatedAct = Tanh((!name.empty()) ? name + "_cellUpdatedAct" : "");
-    add->addChild(cellUpdatedAct, 0, 0);
-    cellUpdatedAct->addChild(outputGateMul, 0, 1);
-    outputGateMul->addChild(hiddenState, 0, 0);
-    add->addChild(cellState, 0, 0);
-
-    std::shared_ptr<GraphView> microGraph = std::make_shared<GraphView>();
-    microGraph->add(input);
-    microGraph->add({hiddenState, cellState, add,
-        forgetGateX, forgetGateH, forgetGate, forgetGateAct, forgetGateMul,
-        inputGateX, inputGateH, inputGate, inputGateAct, inputGateMul,
-        cellCandidateX, cellCandidateH, cellCandidate, cellCandidateAct,
-        outputGateX, outputGateH, outputGate, outputGateAct, outputGateMul,
-        cellUpdatedAct}, false);
-
-    microGraph->setOrderedInputs({{input, 0},
-        {inputGateX, 1}, {outputGateX, 1}, {forgetGateX, 1}, {cellCandidateX, 1},
-        {inputGateH, 1}, {outputGateH, 1}, {forgetGateH, 1}, {cellCandidateH, 1},
-        {inputGateX, 2}, {outputGateX, 2}, {forgetGateX, 2}, {cellCandidateX, 2},
-        {inputGateH, 2}, {outputGateH, 2}, {forgetGateH, 2}, {cellCandidateH, 2},
-        {hiddenState, 1}, {cellState, 1}});
-    microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
-
-    auto metaOp = MetaOperator("LSTM", microGraph, name);
-    addProducer(metaOp, 1, {hidden_channels, in_channels}, "wi");
-    addProducer(metaOp, 2, {hidden_channels, in_channels}, "wo");
-    addProducer(metaOp, 3, {hidden_channels, in_channels}, "wf");
-    addProducer(metaOp, 4, {hidden_channels, in_channels}, "wc");
-    addProducer(metaOp, 5, {hidden_channels, hidden_channels}, "ri");
-    addProducer(metaOp, 6, {hidden_channels, hidden_channels}, "ro");
-    addProducer(metaOp, 7, {hidden_channels, hidden_channels}, "rf");
-    addProducer(metaOp, 8, {hidden_channels, hidden_channels}, "rc");
-    addProducer(metaOp, 9, {(noBias ? 0 : hidden_channels)}, "wbi");
-    addProducer(metaOp, 10, {(noBias ? 0 : hidden_channels)}, "wbo");
-    addProducer(metaOp, 11, {(noBias ? 0 : hidden_channels)}, "wbf");
-    addProducer(metaOp, 12, {(noBias ? 0 : hidden_channels)}, "wbc");
-    addProducer(metaOp, 13, {(noBias ? 0 : hidden_channels)}, "rbi");
-    addProducer(metaOp, 14, {(noBias ? 0 : hidden_channels)}, "rbo");
-    addProducer(metaOp, 15, {(noBias ? 0 : hidden_channels)}, "rbf");
-    addProducer(metaOp, 16, {(noBias ? 0 : hidden_channels)}, "rbc");
-    return metaOp;
-}
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Node> LSTM(DimSize_t in_channels,
+                           DimSize_t hidden_channels,
+                           DimSize_t seq_length,
+                           bool noBias = false,
+                           const std::string& name = "");
+
+std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length,
+                                         bool noBias = false);
+
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 3ee2342297208f6f4e4b061409bc5071c811d2ac..09172f9d59d417132da7577fdec148e882e3d613 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -87,7 +87,6 @@ public:
      * @param data Data to copy.
      */
     virtual void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
-    virtual void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) = 0;
     virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0;
         /**
      * @brief Set the specified output value by performing a deep copy of the given data.
@@ -95,7 +94,6 @@ public:
      * @param inputIdx Index of the input to set.
      */
     virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) = 0;
-    virtual void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) = 0;
     virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
 
     std::shared_ptr<Hook> getHook(const std::string& hookName) {
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index a493793278d42904d8a62e31571720f94ff1655d..f2a59dda743af52647ad650aae516ef07ba89ac4 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -57,13 +57,11 @@ public:
     // Tensor access
     // input management
     void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override;
-    void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override;
     const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const;
     std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final;
 
     // output management
     void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override;
-    void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override;
     virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const;
     std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final;
     ///////////////////////////////////////////////////
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 23825079673129ea08aa7da40b21a8cc921d6ba0..c376bab3db22b6710a0915f7fcf2f749a60b7b61 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -107,12 +107,6 @@ public:
     void backward() override final {
         // fmt::print("Basic Producer backward() function.\n");
     }
-    void setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) override {
-        if (getAttr<ProdAttr::Constant>()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
-        }
-        OperatorTensor::setOutput(outputIdx, std::move(data));
-    }
 
     void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) override {
         if (getAttr<ProdAttr::Constant>()) {
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3132e4ab7adcc331772d627147cc31c25597570a
--- /dev/null
+++ b/include/aidge/operator/Shape.hpp
@@ -0,0 +1,103 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SHAPE_H_
+#define AIDGE_CORE_OPERATOR_SHAPE_H_
+
+#include <cstdint>  // std::int64_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class Shape_OpImpl : public OperatorImpl {
+public:
+    Shape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
+enum class ShapeAttr { Start, End };
+
+class Shape_Op : public OperatorTensor,
+                public Registrable<Shape_Op,
+                                   std::string,
+                                   std::shared_ptr<OperatorImpl>(const Shape_Op&)>,
+                public StaticAttributes<ShapeAttr, std::int64_t, std::int64_t> {
+
+public:
+    static const std::string Type;
+
+    Shape_Op() = delete;
+
+    using Attributes_ = StaticAttributes<ShapeAttr, std::int64_t, std::int64_t>;
+    template <ShapeAttr e> using attr = typename Attributes_::template attr<e>;
+    Shape_Op(std::int64_t start, std::int64_t end)
+            : OperatorTensor(Type, 1, 0, 1),
+            Attributes_(attr<ShapeAttr::Start>(start),
+                        attr<ShapeAttr::End>(end))
+    {
+        mImpl = std::make_shared<Shape_OpImpl>(*this);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Shape_Op(const Shape_Op& op)
+        : OperatorTensor(op),
+          Attributes_(op)
+    {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Shape_Op, *this, op.backend());
+        }
+        else {
+            mImpl = std::make_shared<Shape_OpImpl>(*this);
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Shape_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Shape_Op>(*this);
+    }
+
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Shape(std::int64_t start = 0, std::int64_t end = -1, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"Start", "End"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SHAPE_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index d48dbc2b60e46eb5c074b8adae065383e29b1769..1868dc6e3df48401ef3f8a126b07572e2f45144d 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -30,16 +30,16 @@ class Softmax_Op : public OperatorTensor,
                 public Registrable<Softmax_Op,
                                    std::string,
                                    std::shared_ptr<OperatorImpl>(const Softmax_Op&)>,
-                public StaticAttributes<SoftmaxAttr, int> {
+                public StaticAttributes<SoftmaxAttr, std::size_t> {
 
 public:
     static const std::string Type;
 
     Softmax_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SoftmaxAttr, int>;
+    using Attributes_ = StaticAttributes<SoftmaxAttr, std::size_t>;
     template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
-    Softmax_Op(int axis)
+    Softmax_Op(std::size_t axis)
             :  OperatorTensor(Type, 1, 0, 1),
             Attributes_(attr<SoftmaxAttr::AxisIdx>(axis)) {}
 
@@ -76,7 +76,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Softmax(int axis, const std::string& name = "") {
+inline std::shared_ptr<Node> Softmax(std::size_t axis, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..ff50a6aa7b8de971431515a09ca4e684dcc51865
--- /dev/null
+++ b/include/aidge/operator/Split.hpp
@@ -0,0 +1,111 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SPLIT_H_
+#define AIDGE_CORE_OPERATOR_SPLIT_H_
+
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class Split_OpImpl : public OperatorImpl {
+public:
+    Split_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
+enum class SplitAttr { Axis, Split };
+
+class Split_Op
+    : public OperatorTensor,
+      public Registrable<Split_Op, std::string, std::shared_ptr<OperatorImpl>(const Split_Op &)>,
+      public StaticAttributes<SplitAttr, std::int8_t, std::vector<DimSize_t>> {
+
+public:
+    static const std::string Type;
+
+    Split_Op() = delete;
+
+    using Attributes_ = StaticAttributes<SplitAttr,  std::int8_t, std::vector<DimSize_t>>;
+    template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
+    Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split)
+        : OperatorTensor(Type, 2, 0, nbOutputs),
+          Attributes_(attr<SplitAttr::Axis>(axis),
+                      attr<SplitAttr::Split>(split))
+    {
+        mImpl = std::make_shared<Split_OpImpl>(*this);
+    }
+
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
+     * input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Split_Op(const Split_Op &op)
+        : OperatorTensor(op),
+          Attributes_(op)
+    {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Split_Op, *this, op.backend());
+        }
+        else {
+            mImpl = std::make_shared<Split_OpImpl>(*this);
+        }
+    }
+public:
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Split_Op
+     */
+    std::shared_ptr<Operator> clone() const override { return std::make_shared<Split_Op>(*this); }
+
+    bool dimsForwarded() const override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "split"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output_0", "data_output_n"};
+    }
+
+};
+
+/**
+ * @brief Exract a sub-Tensor from a bigger original Tensor.
+ * @param name Name of the Operator.
+ * @return std::shared_ptr<Node> A Node containing the Operator.
+ */
+inline std::shared_ptr<Node> Split(DimSize_t nbOutput,
+                                   std::int8_t axis = 0,
+                                   const std::vector<DimSize_t>& split = {},
+                                   const std::string &name = "") {
+    return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::SplitAttr>::data[] = { "Axis", "Split" };
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SPLIT_H_ */
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 16ac2794a283d817f6a4e1586349e55ec626167e..31420110f19761442b67e9701aeca566976aee1b 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -26,9 +26,9 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-class Transpose_OpImpl : public OperatorImpl {
+class TransposeImpl : public OperatorImpl {
 public:
-    Transpose_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    TransposeImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
     void forward() override;
 };
 
@@ -47,11 +47,11 @@ class Transpose_Op : public OperatorTensor,
     template <TransposeAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    Transpose_Op(const std::vector<DimSize_t> &output_dims_order)
+    Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder)
         : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order))
+          Attributes_(attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder))
     {
-        mImpl = std::make_shared<Transpose_OpImpl>(*this);
+        mImpl = std::make_shared<TransposeImpl>(*this);
     }
 
     /**
@@ -66,7 +66,7 @@ class Transpose_Op : public OperatorTensor,
             SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
         }
         else {
-            mImpl = std::make_shared<Transpose_OpImpl>(*this);
+            mImpl = std::make_shared<TransposeImpl>(*this);
         }
     }
 
@@ -90,9 +90,9 @@ class Transpose_Op : public OperatorTensor,
     }
 };
 
-inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &output_dims_order,
+inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
                                            const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Transpose_Op>(output_dims_order), name);
+    return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
 }
 }  // namespace Aidge
 
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 3c2120565e1637697e5258723b1b366a520fdf80..005175ab613594c48959073c4674e6d69b60b29f 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -73,6 +73,10 @@ void init_Tensor(py::module& m){
         (m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
 
     pyClassTensor.def(py::init<>())
+    .def(py::self + py::self)
+    .def(py::self - py::self)
+    .def(py::self * py::self)
+    .def(py::self / py::self)
     .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true)
     .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
     .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
@@ -89,6 +93,9 @@ void init_Tensor(py::module& m){
     .def("__str__", [](Tensor& b) {
         return b.toString();
     })
+    .def("__repr__", [](Tensor& b) {
+        return "Tensor(dtype = " + std::string(EnumStrings<DataType>::data[static_cast<int>(b.dataType())]) + ",\n" + b.toString() + ")";
+    })
     .def("__len__", [](Tensor& b) -> size_t{
         return b.size();
     })
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 1f588352d46e19cb5f40fc98d5f94ebd1f392dcc..103e7c1e4db6e197a1dac959a25d266e031d3e55 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -9,11 +9,11 @@
  *
  ********************************************************************************/
 
+#include <memory>
+
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Add.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
 
@@ -22,13 +22,17 @@ namespace Aidge {
 
 void declare_Add(py::module &m) {
   py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
-  .def_static("get_inputs_name", &Add_Op::getInputsName)
-  .def_static("get_outputs_name", &Add_Op::getOutputsName);
+    .def(py::init<const IOIndex_t>(), py::arg("nb_inputs"))
+    .def_static("get_inputs_name", &Add_Op::getInputsName)
+    .def_static("get_outputs_name", &Add_Op::getOutputsName);
+
   declare_registrable<Add_Op>(m, "AddOp");
-  m.def("Add", &Add, py::arg("nbIn"), py::arg("name") = "");
+
+  m.def("Add", &Add, py::arg("nb_inputs"), py::arg("name") = "");
 }
 
 void init_Add(py::module &m) {
   declare_Add(m);
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 5d72a3507c4926412b48cda42b1c3bcbe10e9460..966def88033dee8cd6cee06d80dc32114050b430 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -28,16 +28,18 @@ namespace Aidge {
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
   const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D");
   py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
-    m, pyClassName.c_str(),
-    py::multiple_inheritance())
-  .def(py::init<const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &>(),
-        py::arg("kernel_dims"),
-        py::arg("stride_dims"))
-  .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
-  .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
+        m, pyClassName.c_str(),
+        py::multiple_inheritance())
+    .def(py::init<const std::array<DimSize_t, DIM> &,
+                  const std::array<DimSize_t, DIM> &>(),
+            py::arg("kernel_dims"),
+            py::arg("stride_dims"))
+    .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
+    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+    .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
+
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
+
   m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims) {
@@ -48,7 +50,6 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
-
 }
 
 
@@ -61,4 +62,5 @@ void init_AvgPooling(py::module &m) {
   // m.def("AvgPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
   // (&)[1])>(&AvgPooling));
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index a12df1c8656003aef6c643b1060f21cc44111197..4ec25e02a50330bdf764b598b598836a251d65ea 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -24,18 +24,20 @@ template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
     const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
     py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance())
-    .def(py::init<float, float>(),
-        py::arg("epsilon"),
-        py::arg("momentum"))
-    .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
-    .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
-    .def_static("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
+        .def(py::init<float, float>(),
+            py::arg("epsilon"),
+            py::arg("momentum"))
+        .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
+        .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
+        .def_static("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
+
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
-    m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
+    m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nb_features"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
 
 void init_BatchNorm(py::module &m) {
     declare_BatchNormOp<2>(m);
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index b08554a36c1bb955e36764c0147e0119a53d46e7..07bb9f2fc16fcbefb693aeec00c380661f4a6e44 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -21,11 +21,16 @@ namespace Aidge {
 
 void init_Concat(py::module& m) {
     py::class_<Concat_Op, std::shared_ptr<Concat_Op>, Attributes, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
-    .def_static("get_inputs_name", &Concat_Op::getInputsName)
-    .def_static("get_outputs_name", &Concat_Op::getOutputsName)
-    .def_static("attributes_name", &Concat_Op::staticGetAttrsName);
+        .def(py::init<const IOIndex_t, const DimSize_t>(),
+                py::arg("nb_inputs"),
+                py::arg("axis"))
+        .def_static("get_inputs_name", &Concat_Op::getInputsName)
+        .def_static("get_outputs_name", &Concat_Op::getOutputsName)
+        .def_static("attributes_name", &Concat_Op::staticGetAttrsName);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
-    m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = "");
+
+    m.def("Concat", &Concat, py::arg("nb_inputs"), py::arg("axis"), py::arg("name") = "");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 0f4b970d6c192ac4c90d7d6d4b8fe5bfa184845d..c1a4f1319e4e715add01417f86d17bddadb992f1 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -30,24 +30,27 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
   py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
-  .def(py::init<DimSize_t,
-                DimSize_t,
-                const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                bool>(),
-        py::arg("in_channels"),
-        py::arg("out_channels"),
-        py::arg("kernel_dims"),
-        py::arg("stride_dims"),
-        py::arg("dilation_dims"),
-        py::arg("no_bias"))
-    .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
-    .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
-    .def_static("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
-    ;
-  declare_registrable<Conv_Op<DIM>>(m, pyClassName);
+        .def(py::init([](const std::vector<DimSize_t>& kernel_dims,
+                         const std::vector<DimSize_t> &stride_dims,
+                         const std::vector<DimSize_t> &dilation_dims,
+                         bool no_bias) {
+            AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+            AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+            AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+            return new Conv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+        }), py::arg("kernel_dims"),
+            py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+            py::arg("no_bias") = false)
+        .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
+        .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
+        .def_static("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
+        .def("in_channels", &Conv_Op<DIM>::inChannels)
+        .def("out_channels", &Conv_Op<DIM>::outChannels)
+        ;
 
+  declare_registrable<Conv_Op<DIM>>(m, pyClassName);
 
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
                                                          DimSize_t out_channels,
@@ -72,8 +75,9 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
 
 
 void init_Conv(py::module &m) {
-  declare_ConvOp<1>(m);
+//   declare_ConvOp<1>(m);
   declare_ConvOp<2>(m);
-  declare_ConvOp<3>(m);
+//   declare_ConvOp<3>(m);
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index be47f57d9bb36dc249d635bf3afb874d1df51308..ce286094d6606d8b7161acf9e3fb3c6cbcbb88c9 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -31,19 +31,19 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
   py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
-  .def(py::init<const DimSize_t,
-                const std::array<DimSize_t, DIM> &,
+  .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
                 bool>(),
-        py::arg("nb_channels"),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
         py::arg("dilation_dims"),
         py::arg("no_bias"))
   .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
-  .def_static("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
+  .def_static("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName)
+  .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels);
+
   declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName);
   m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
                                                                   const std::vector<DimSize_t>& kernel_dims,
@@ -67,9 +67,9 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
 
 
 void init_ConvDepthWise(py::module &m) {
-  declare_ConvDepthWiseOp<1>(m);
+//   declare_ConvDepthWiseOp<1>(m);
   declare_ConvDepthWiseOp<2>(m);
-  declare_ConvDepthWiseOp<3>(m);
+//   declare_ConvDepthWiseOp<3>(m);
 
   // FIXME:
   // m.def("ConvDepthWise1D", static_cast<NodeAPI(*)(const char*, int, int, int const
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index 2f7dbac6317f107976b9de72463a9c794c8edaf4..9dcb98a54596f32525d2880dd6e955d4643f6e7c 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -20,9 +20,11 @@ namespace Aidge {
 
 void init_Div(py::module& m) {
     py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance())
-    .def_static("get_inputs_name", &Div_Op::getInputsName)
-    .def_static("get_outputs_name", &Div_Op::getOutputsName);
+        .def(py::init<>())
+        .def_static("get_inputs_name", &Div_Op::getInputsName)
+        .def_static("get_outputs_name", &Div_Op::getOutputsName);
     declare_registrable<Div_Op>(m, "DivOp");
     m.def("Div", &Div, py::arg("name") = "");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index 4979ce54f72c12ff564d10ebf6fbd38aaec81e92..c248753ca8de46293d49ce4dc614ae258c313256 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -20,9 +20,12 @@ namespace Aidge {
 
 void init_Erf(py::module& m) {
     py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
-    .def_static("get_inputs_name", &Erf_Op::getInputsName)
-    .def_static("get_outputs_name", &Erf_Op::getOutputsName);
+        .def(py::init<>())
+        .def_static("get_inputs_name", &Erf_Op::getInputsName)
+        .def_static("get_outputs_name", &Erf_Op::getOutputsName);
+
     declare_registrable<Erf_Op>(m, "ErfOp");
+
     m.def("Erf", &Erf, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 185df9220bf112d729431b497b9c49af8997b7b8..6cff90d0ad3aacf4cf8a465408eb490e3f21abda 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -22,14 +22,19 @@ namespace Aidge {
 
 void declare_FC(py::module &m) {
   py::class_<FC_Op, std::shared_ptr<FC_Op>, Attributes, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
-  .def_static("get_inputs_name", &FC_Op::getInputsName)
-  .def_static("get_outputs_name", &FC_Op::getOutputsName)
-  .def_static("attributes_name", &FC_Op::staticGetAttrsName);
+    .def(py::init<bool>(), py::arg("no_bias"))
+    .def_static("get_inputs_name", &FC_Op::getInputsName)
+    .def_static("get_outputs_name", &FC_Op::getOutputsName)
+    .def_static("attributes_name", &FC_Op::staticGetAttrsName)
+    .def("out_channels", &FC_Op::outChannels);
+
   declare_registrable<FC_Op>(m, "FCOp");
-  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
+
+  m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("no_bias") = false, py::arg("name") = "");
 }
 
 void init_FC(py::module &m) {
   declare_FC(m);
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 2c6d5f3cf0af893c12e31659f30f1059c52c3a26..83891624deede4b1f6f6f0c649358e9ed8de0a24 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -22,11 +22,19 @@ namespace Aidge {
 
 void init_Gather(py::module& m) {
     py::class_<Gather_Op, std::shared_ptr<Gather_Op>, Attributes, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
-    .def_static("get_inputs_name", &Gather_Op::getInputsName)
-    .def_static("get_outputs_name", &Gather_Op::getOutputsName)
-    .def_static("attributes_name", &Gather_Op::staticGetAttrsName);
+        .def(py::init<std::int8_t,
+                      const std::vector<int64_t>,
+                      const std::vector<DimSize_t>>(),
+                py::arg("axis"),
+                py::arg("indices"),
+                py::arg("gathered_shape"))
+        .def_static("get_inputs_name", &Gather_Op::getInputsName)
+        .def_static("get_outputs_name", &Gather_Op::getOutputsName)
+        .def_static("attributes_name", &Gather_Op::staticGetAttrsName);
+
     declare_registrable<Gather_Op>(m, "GatherOp");
 
     m.def("Gather", &Gather, py::arg("axis") = 0, py::arg("indices") = std::vector<std::int64_t>(), py::arg("gathered_shape") = std::vector<std::size_t>(), py::arg("name") = "");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 897cd359a4b368dc599f37136ade3508b5ec5a76..7078ca3b0e84d7251aadbc6035e348ac9cd72571 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -9,23 +9,33 @@
  *
  ********************************************************************************/
 
+#include <stdio.h>
+
+#include <string>
+
+#include <pybind11/functional.h>
 #include <pybind11/pybind11.h>
 #include <pybind11/stl.h>
-#include <pybind11/functional.h>
-#include <stdio.h>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+
 namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
     py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
-    .def_readonly_static("identity", &GenericOperator_Op::Identity)
-    .def("set_forward_dims", &GenericOperator_Op::setForwardDims, py::arg("computation_function"));
+        .def(py::init<const std::string&, IOIndex_t, IOIndex_t, IOIndex_t>(),
+                py::arg("type"),
+                py::arg("nb_data"),
+                py::arg("nb_param"),
+                py::arg("nb_outputs"))
+        .def_readonly_static("identity", &GenericOperator_Op::Identity)
+        .def("set_forward_dims", &GenericOperator_Op::setForwardDims, py::arg("computation_function"));
 
     // &GenericOperator
     m.def("GenericOperator",
diff --git a/python_binding/operator/pybind_GlobalAveragePooling.cpp b/python_binding/operator/pybind_GlobalAveragePooling.cpp
index b5f19894618fa5ee14fdc346f0b59cc87814a724..d4d2a921addaef676913cee2a16991ad36686767 100644
--- a/python_binding/operator/pybind_GlobalAveragePooling.cpp
+++ b/python_binding/operator/pybind_GlobalAveragePooling.cpp
@@ -23,9 +23,13 @@ void init_GlobalAveragePooling(py::module &m) {
   py::class_<GlobalAveragePooling_Op, std::shared_ptr<GlobalAveragePooling_Op>,
              OperatorTensor>(m, pyClassName.c_str(),
                              py::multiple_inheritance())
+      .def(py::init<>())
       .def_static("get_inputs_name", &GlobalAveragePooling_Op::getInputsName)
       .def_static("get_outputs_name", &GlobalAveragePooling_Op::getOutputsName);
+
   declare_registrable<GlobalAveragePooling_Op>(m, pyClassName);
+
   m.def("globalaveragepooling", &GlobalAveragePooling, py::arg("name") = "");
 }
+
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp
index 1ab87309358ee700182db1e7c39009cf56c45c7d..560f2889f20233ef928557aa230e6dab7f0a5d2b 100644
--- a/python_binding/operator/pybind_Identity.cpp
+++ b/python_binding/operator/pybind_Identity.cpp
@@ -19,10 +19,12 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Identity(py::module& m) {
-    py::class_<Identity_Op, std::shared_ptr<Identity_Op>, Operator>(m, "IdentityOp", py::multiple_inheritance())
-    .def_static("get_inputs_name", &Identity_Op::getInputsName)
-    .def_static("get_outputs_name", &Identity_Op::getOutputsName);
+    py::class_<Identity_Op, std::shared_ptr<Identity_Op>, OperatorTensor>(m, "IdentityOp", py::multiple_inheritance())
+        .def(py::init<>())
+        .def_static("get_inputs_name", &Identity_Op::getInputsName)
+        .def_static("get_outputs_name", &Identity_Op::getOutputsName);
 
     m.def("Identity", &Identity, py::arg("name") = "");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 4fbef4b0dcd3b4c7128d36d1aee09225d5996b7c..b859b3be5b3dd2606d227a3ca26bd1b4eb8e75a9 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -20,10 +20,12 @@ namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
     py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Attributes, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
-    .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
-    .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
-    .def_static("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
+        .def(py::init<float>(), py::arg("negative_slope"))
+        .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
+        .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
+        .def_static("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
     declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
+
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index f56da5a2457f249efbce84d246a6e204d3d2ecd7..09e11f89ea579b5a3aa75f177958d981c53f1dce 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -22,8 +22,9 @@ namespace Aidge {
 
 void init_MatMul(py::module &m) {
   py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
-  .def_static("get_inputs_name", &MatMul_Op::getInputsName)
-  .def_static("get_outputs_name", &MatMul_Op::getOutputsName);
+    .def(py::init<>())
+    .def_static("get_inputs_name", &MatMul_Op::getInputsName)
+    .def_static("get_outputs_name", &MatMul_Op::getOutputsName);
   declare_registrable<MatMul_Op>(m, "MatMulOp");
   m.def("MatMul", &MatMul, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index 20cd3f156996c98bb64502a90ab98535f87cc2a3..ee3f85b6578054512df7b0087d1a972176cd50a3 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -47,6 +47,24 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("no_bias")= false);
+    m.def(("PaddedConvOp" + std::to_string(DIM) + "D").c_str(), [](
+                                                         const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &padding_dims,
+                                                         const std::vector<DimSize_t> &dilation_dims,
+                                                         bool no_bias)
+    {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+        return PaddedConv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+    }, py::arg("kernel_dims"),
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("no_bias")= false);
 }
 
 template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
@@ -71,6 +89,24 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("no_bias") = false);
+  m.def(("PaddedConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(), [](
+                                                         const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &padding_dims,
+                                                         const std::vector<DimSize_t> &dilation_dims,
+                                                         bool no_bias)
+    {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
+
+        return PaddedConvDepthWise_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+    }, py::arg("kernel_dims"),
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("no_bias") = false);
 
 }
 
@@ -89,7 +125,18 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0));
+  m.def(("PaddedAvgPoolingOp" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &padding_dims)
+    {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
 
+        return PaddedAvgPooling_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()));
+    }, py::arg("kernel_dims"),
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0));
 }
 
 template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
@@ -109,6 +156,20 @@ template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("ceil_mode") = false);
+  m.def(("PaddedMaxPoolingOp" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &padding_dims,
+                                                         bool ceil_mode)
+    {
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
+
+        return PaddedMaxPooling_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), ceil_mode);
+    }, py::arg("kernel_dims"),
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
+       py::arg("ceil_mode") = false);
 
 }
 
@@ -118,21 +179,24 @@ void declare_LSTMOp(py::module &m) {
        py::arg("seq_length"),
        py::arg("nobias") = false,
        py::arg("name") = "");
+  m.def("LSTMOp", &LSTM_Op,
+       py::arg("seq_length"),
+       py::arg("nobias") = false);
 }
 
 void init_MetaOperatorDefs(py::module &m) {
-  declare_PaddedConvOp<1>(m);
+//   declare_PaddedConvOp<1>(m);
   declare_PaddedConvOp<2>(m);
-  declare_PaddedConvOp<3>(m);
-  declare_PaddedConvDepthWiseOp<1>(m);
+//   declare_PaddedConvOp<3>(m);
+//   declare_PaddedConvDepthWiseOp<1>(m);
   declare_PaddedConvDepthWiseOp<2>(m);
-  declare_PaddedConvDepthWiseOp<3>(m);
-  declare_PaddedAvgPoolingOp<1>(m);
+//   declare_PaddedConvDepthWiseOp<3>(m);
+//   declare_PaddedAvgPoolingOp<1>(m);
   declare_PaddedAvgPoolingOp<2>(m);
-  declare_PaddedAvgPoolingOp<3>(m);
-  declare_PaddedMaxPoolingOp<1>(m);
+//   declare_PaddedAvgPoolingOp<3>(m);
+//   declare_PaddedMaxPoolingOp<1>(m);
   declare_PaddedMaxPoolingOp<2>(m);
-  declare_PaddedMaxPoolingOp<3>(m);
+//   declare_PaddedMaxPoolingOp<3>(m);
   declare_LSTMOp(m);
 
   py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperator_Op", py::multiple_inheritance())
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 60f8ea7012cbf322ff34685da875065c22b44ee0..1658b0d959c0882d53e078f6d68b4474b34c739e 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -20,6 +20,7 @@ namespace Aidge {
 
 void init_Mul(py::module& m) {
     py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance())
+    .def(py::init<>())
     .def_static("get_inputs_name", &Mul_Op::getInputsName)
     .def_static("get_outputs_name", &Mul_Op::getOutputsName);
     declare_registrable<Mul_Op>(m, "MulOp");
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index 333e55b3daa9b661995756507e60d610a8ac84a0..d8873636d029435706cfb9766262ae0b8409d8a5 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -20,6 +20,7 @@ namespace Aidge {
 
 void init_Pop(py::module& m) {
     py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor, Attributes>(m, "PopOp", py::multiple_inheritance())
+    .def(py::init<>())
     .def_static("get_inputs_name", &Pop_Op::getInputsName)
     .def_static("get_outputs_name", &Pop_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index c88e699413b99ff93772fc44d867d32717767155..e5d67542cd1acc5b2982081e4cf3a91948542147 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -20,6 +20,7 @@ namespace Aidge {
 
 void init_Pow(py::module& m) {
     py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance())
+    .def(py::init<>())
     .def_static("get_inputs_name", &Pow_Op::getInputsName)
     .def_static("get_outputs_name", &Pow_Op::getOutputsName);
     declare_registrable<Pow_Op>(m, "PowOp");
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index f1a60f4b20987c4ef489feff2e43a9c6f03916c9..71347554fdc9cd937b1f14df16e370db2f77a267 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -35,12 +35,22 @@ void init_Producer(py::module &m) {
         m,
         "ProducerOp",
         py::multiple_inheritance())
+    .def(py::init<const std::shared_ptr<Tensor>, bool>(), py::arg("tensor"), py::arg("constant"))
     .def("dims", &Producer_Op::dims)
     .def_static("get_inputs_name", &Producer_Op::getInputsName)
     .def_static("get_outputs_name", &Producer_Op::getOutputsName)
     .def_static("attributes_name", &Producer_Op::staticGetAttrsName);
-    m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false);
+
+    m.def("Producer", static_cast<std::shared_ptr<Node>(*)(
+                                        const std::shared_ptr<Tensor>,
+                                        const std::string&,
+                                        bool)>(&Producer),
+                      py::arg("tensor"),
+                      py::arg("name") = "",
+                      py::arg("constant") = false);
+
     declare_registrable<Producer_Op>(m, "ProducerOp");
+
     declare_Producer<1>(m);
     declare_Producer<2>(m);
     declare_Producer<3>(m);
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index ec8f43b69c189204ae30503ea77dc17ab845c135..d611523f15a7007b0e9ab9cce323ed9a57d8ecdf 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -20,6 +20,7 @@ namespace Aidge {
 
 void init_ReLU(py::module& m) {
     py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance())
+    .def(py::init<>())
     .def_static("get_inputs_name", &ReLU_Op::getInputsName)
     .def_static("get_outputs_name", &ReLU_Op::getOutputsName);
     declare_registrable<ReLU_Op>(m, "ReLUOp");
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 150d06419845a0ddfa6fec3da904eae94dcd5b19..00201c9bdf4ecd7ad76202c2fe78180317b736dd 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -28,6 +28,7 @@ void declare_ReduceMeanOp(py::module &m) {
   const std::string pyClassName("ReduceMeanOp");
   py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, Attributes, OperatorTensor>(
     m, pyClassName.c_str(), py::multiple_inheritance())
+    .def(py::init<std::vector<std::int32_t>, DimSize_t>(), py::arg("axes"), py::arg("keep_dims"))
     .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
     .def_static("attributes_name", &ReduceMean_Op::staticGetAttrsName)
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index e987fd9cb36471af6a7fabc26ca51a887abc6880..5a07de2f00399b761c0652e5dcdccdc0d49938de 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -20,6 +20,7 @@ namespace Aidge {
 
 void init_Reshape(py::module& m) {
     py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, Attributes, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
+    .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
     .def_static("get_inputs_name", &Reshape_Op::getInputsName)
     .def_static("get_outputs_name", &Reshape_Op::getOutputsName);
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index 23534f2d86cf8cebe0483dfa017f2d2e53c00a8c..0660cdb003ed4d5946f54786c0a51d9051d83d5a 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -22,6 +22,7 @@ namespace Aidge {
 void init_Scaling(py::module& m)
 {
     py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, Attributes, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
+    .def(py::init<float, size_t, bool>(), py::arg("scaling_factor"), py::arg("nb_bits"), py::arg("is_output_unsigned"))
     .def_static("get_inputs_name", &Scaling_Op::getInputsName)
     .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
     .def_static("attributes_name", &Scaling_Op::staticGetAttrsName);
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..dbae1d95d81ef65d27167bcd0774366dcc41b325
--- /dev/null
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -0,0 +1,38 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Shape.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Shape(py::module& m) {
+    py::class_<Shape_Op, std::shared_ptr<Shape_Op>, Attributes, OperatorTensor>(m, "ShapeOp", py::multiple_inheritance())
+        .def(py::init<std::int64_t,
+                      std::int64_t>(),
+                py::arg("start"),
+                py::arg("end"))
+        .def_static("get_inputs_name", &Shape_Op::getInputsName)
+        .def_static("get_outputs_name", &Shape_Op::getOutputsName)
+        .def_static("attributes_name", &Shape_Op::staticGetAttrsName);
+
+    declare_registrable<Shape_Op>(m, "ShapeOp");
+
+    m.def("Shape", &Shape, py::arg("start") = 0, py::arg("end") = -1, py::arg("name") = "");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp
index b9cb39dc1563f426e6885c798a157d7697db2866..0ba94c73fcd1fb435194f8485567771a147ec616 100644
--- a/python_binding/operator/pybind_Sigmoid.cpp
+++ b/python_binding/operator/pybind_Sigmoid.cpp
@@ -20,6 +20,7 @@ namespace Aidge {
 
 void init_Sigmoid(py::module& m) {
     py::class_<Sigmoid_Op, std::shared_ptr<Sigmoid_Op>, OperatorTensor>(m, "SigmoidOp", py::multiple_inheritance())
+    .def(py::init<>())
     .def_static("get_inputs_name", &Sigmoid_Op::getInputsName)
     .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index a7ee50a2097297621c304035a7ac4a73d14d892b..b87cc8da4874c666de21a6e798a66e3c7fad9c10 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -21,6 +21,14 @@ namespace Aidge {
 
 void init_Slice(py::module& m) {
     py::class_<Slice_Op, std::shared_ptr<Slice_Op>, OperatorTensor>(m, "SliceOp", py::multiple_inheritance())
+    .def(py::init<const std::vector<std::int64_t>&,
+                  const std::vector<std::int64_t>&,
+                  const std::vector<std::int8_t>&,
+                  const std::vector<std::int64_t>&>(),
+                  py::arg("starts"),
+                  py::arg("ends"),
+                  py::arg("axes"),
+                  py::arg("steps"))
     .def_static("get_inputs_name", &Slice_Op::getInputsName)
     .def_static("get_outputs_name", &Slice_Op::getOutputsName);
     declare_registrable<Slice_Op>(m, "SliceOp");
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 1a50705510098eb009d47933c165750bfa9d6139..becb6f35fb7413c042f6a902aadb602e4547ee01 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -21,6 +21,7 @@ namespace Aidge {
 
 void init_Softmax(py::module& m) {
     py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Attributes, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
+    .def(py::init<std::size_t>(), py::arg("axis"))
     .def_static("get_inputs_name", &Softmax_Op::getInputsName)
     .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
     .def_static("attributes_name", &Softmax_Op::staticGetAttrsName);
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6efc123864f21bf8ea02008b29fe59f31685f17c
--- /dev/null
+++ b/python_binding/operator/pybind_Split.cpp
@@ -0,0 +1,38 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Split.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Split(py::module& m) {
+    py::class_<Split_Op, std::shared_ptr<Split_Op>, Attributes, OperatorTensor>(m, "SplitOp", py::multiple_inheritance())
+        .def(py::init<DimSize_t, std::int8_t, std::vector<DimSize_t>&>(),
+                py::arg("nb_outputs"),
+                py::arg("axis"),
+                py::arg("split"))
+        .def_static("get_inputs_name", &Split_Op::getInputsName)
+        .def_static("get_outputs_name", &Split_Op::getOutputsName)
+        .def_static("attributes_name", &Split_Op::staticGetAttrsName);
+
+    declare_registrable<Split_Op>(m, "SplitOp");
+
+    m.def("Split", &Split, py::arg("nb_outputs"), py::arg("axis") = 0, py::arg("split") = std::vector<DimSize_t>(), py::arg("name") = "");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index 9efb9a668f86dc9a5340aeaa5708f7bbf205e3e4..9425eba06574c73339e8e4628ffded3449a8b4ab 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -20,6 +20,7 @@ namespace Aidge {
 
 void init_Sqrt(py::module& m) {
     py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance())
+    .def(py::init<>())
     .def_static("get_inputs_name", &Sqrt_Op::getInputsName)
     .def_static("get_outputs_name", &Sqrt_Op::getOutputsName);
     declare_registrable<Sqrt_Op>(m, "SqrtOp");
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index 4d2989a1b4308bd1580ce3fc2e23d1a6be69085e..752490a72bc35ec8a0ab08dd8d51a31c887b4dc6 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -20,6 +20,7 @@ namespace Aidge {
 
 void init_Sub(py::module& m) {
     py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance())
+    .def(py::init<>())
     .def_static("get_inputs_name", &Sub_Op::getInputsName)
     .def_static("get_outputs_name", &Sub_Op::getOutputsName);
     declare_registrable<Sub_Op>(m, "SubOp");
diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp
index f2ef989696ca28a03fb34c2a3c1814aa2a561098..74cde8dd3831c8d29ca87e2314afc27276ec025f 100644
--- a/python_binding/operator/pybind_Tanh.cpp
+++ b/python_binding/operator/pybind_Tanh.cpp
@@ -20,6 +20,7 @@ namespace Aidge {
 
 void init_Tanh(py::module& m) {
     py::class_<Tanh_Op, std::shared_ptr<Tanh_Op>, OperatorTensor>(m, "TanhOp", py::multiple_inheritance())
+    .def(py::init<>())
     .def_static("get_inputs_name", &Tanh_Op::getInputsName)
     .def_static("get_outputs_name", &Tanh_Op::getOutputsName);
 
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index 7a857c4d1f8ba569ee7ccf711f60b4dfbdfddbd0..f3c000291dfca954bbed93b9400ac0bd8df8025b 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -29,6 +29,7 @@ void declare_Transpose(py::module &m) {
   const std::string pyClassName("TransposeOp");
   py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, Attributes, OperatorTensor>(
     m, "TransposeOp", py::multiple_inheritance())
+  .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order"))
   .def_static("get_inputs_name", &Transpose_Op::getInputsName)
   .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
   .def_static("attributes_name", &Transpose_Op::staticGetAttrsName);
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 7b38c2d72d5f4b2eed8d8bbf9f41f47144b51060..42e29fd43324d12ea4cac2c16c88a056903b7c54 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -52,9 +52,11 @@ void init_ReduceMean(py::module&);
 void init_ReLU(py::module&);
 void init_Reshape(py::module&);
 void init_Scaling(py::module&);
+void init_Shape(py::module&);
 void init_Sigmoid(py::module&);
 void init_Slice(py::module&);
 void init_Softmax(py::module&);
+void init_Split(py::module&);
 void init_Sqrt(py::module&);
 void init_Sub(py::module&);
 void init_Tanh(py::module&);
@@ -120,9 +122,11 @@ void init_Aidge(py::module& m) {
     init_ReLU(m);
     init_Reshape(m);
     init_Scaling(m);
+    init_Shape(m);
     init_Sigmoid(m);
     init_Slice(m);
     init_Softmax(m);
+    init_Split(m);
     init_Sqrt(m);
     init_Sub(m);
     init_Tanh(m);
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 2277db2421c36704270b81bdb6c45f19aaa891e4..8a5b40e44308111c5778c5260155b644234103c8 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -129,9 +129,9 @@ void Aidge::OperatorImpl::resetConsummerProducer(){
 }
 
 void Aidge::OperatorImpl::forward() {
-    AIDGE_THROW_OR_ABORT(std::runtime_error, "forward() not implemented");
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "forward() not implemented yet for operator of type {}", mOp.type());
 }
 
 void Aidge::OperatorImpl::backward() {
-    AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented");
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for operator of type {}", mOp.type());
 }
diff --git a/src/backend/TensorImpl.cpp b/src/backend/TensorImpl.cpp
index ee2f82a9cf847bfc6fe51e8d8b621e53a4c93cf4..335122d0583d355b6aab1649b5e8122c16eef15b 100644
--- a/src/backend/TensorImpl.cpp
+++ b/src/backend/TensorImpl.cpp
@@ -15,7 +15,9 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 void Aidge::TensorImpl::copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset, NbElts_t dstOffset) {
-    if (&srcImpl == this && srcOffset == dstOffset) {
+    // Returns if src and dst are the same
+    // OR if src capacity is 0 (no valid data must be copied)
+    if ((&srcImpl == this && srcOffset == dstOffset) || srcImpl.capacity() == 0) {
         return;
     }
 
@@ -24,7 +26,7 @@ void Aidge::TensorImpl::copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbE
             // Same backend, but different device
             copyFromDevice(srcImpl.rawPtr(srcOffset), srcImpl.device(), length, dstOffset);
         }
-        else if (srcImpl.hostPtr() != nullptr) {
+        else if (srcImpl.hostPtr() != nullptr) { // capacity() is > 0 so hostPtr() will not assert
             // Different backend, but input is valid on host
             copyFromHost(srcImpl.hostPtr(srcOffset), length, dstOffset);
         }
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 0bc918995c55a914b29987506578491e2c86fae5..77ca0b00c40e578f45834a16da65ae37ac4b7d3c 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -31,6 +31,7 @@
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
+#include "aidge/operator/Memorize.hpp"
 #include "aidge/utils/Directories.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
@@ -119,9 +120,11 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
           }
         }
 
-        if (node_ptr == mRootNode || node_ptr->type() != "Producer" || showProducers) {
-          fmt::print(fp.get(), "{}_{}({}){}\n", node_ptr->type(), namePtrTable.at(node_ptr),
-                      givenName, nodeCls);
+        if (node_ptr->type() != "Producer" || showProducers) {
+            // if (node_ptr == mRootNode) {
+            fmt::print(fp.get(), "{}_{}({}){}\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                        givenName, nodeCls);
+            // }
         }
     }
 
@@ -423,22 +426,68 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
         }
     }
 
-    // Compute dimensions of every node
-    std::set<std::shared_ptr<Node>> listNodes = getNodes();
+    // List of nodes that are already dims forwarded
+    std::set<std::shared_ptr<Node>> dimsForwarded;
+    // Establish initial list of dims forwardable nodes:
+    // input nodes and childs from Producers
+    std::set<std::shared_ptr<Node>> listNodes = inputNodes();
+    for (const auto& nodePtr : getNodes()) {
+        if (nodePtr->type() == Producer_Op::Type) {
+            // Producers are already dims forwarded!
+            dimsForwarded.insert(nodePtr);
+            // Producers childs are dims forwardable
+            for (const auto& child : nodePtr->getChildren()) {
+                if (inView(child)) {
+                    listNodes.insert(child);
+                }
+            }
+        }
+    }
+
     do {
         std::set<std::shared_ptr<Node>> nextList;
-        for (std::shared_ptr<Node> nodePtr : listNodes) {
+        for (const auto& nodePtr : listNodes) {
             if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
-              const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
-              // Recompute everytime, even if it was already computed in a
-              // previous call of forwardDims(), as the graph may have changed!
-              op->forwardDims(allowDataDependency);
-              if (!op->dimsForwarded()) {
-                  nextList.insert(nodePtr);
-              }
+                const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
+
+                bool anyParent = false;
+                bool parentsForwarded = true;
+                for (const auto& parent : nodePtr->getParents()) {
+                    if (parent != nullptr && inView(parent) && dimsForwarded.find(parent) == dimsForwarded.end()) {
+                        Log::debug("Dimensions not forwarded for parent (node {} (of type {})) of node {} (of type {})",
+                            parent->name(), parent->type(), nodePtr->name(), nodePtr->type());
+                        parentsForwarded = false;
+                    }
+                    else {
+                        anyParent = true;
+                    }
+                }
+
+                // Special rule for Memorize_Op, which only requires one parent
+                // to have its dims forwarded. This avoids circular dependency.
+                if (nodePtr->type() == Memorize_Op::Type && anyParent) {
+                    parentsForwarded = true;
+                }
+
+                if (parentsForwarded && op->forwardDims(allowDataDependency)) {
+                    // Recompute everytime, even if it was already computed in a
+                    // previous call of forwardDims(), as the graph may have changed!
+                    dimsForwarded.insert(nodePtr);
+                    for (const auto& child : nodePtr->getChildren()) {
+                        if (inView(child) && dimsForwarded.find(child) == dimsForwarded.end()) {
+                            nextList.insert(child);
+                        }
+                    }
+                }
+                else {
+                    Log::debug("Unable to forward dimensions for node {} (of type {}) yet", nodePtr->name(), nodePtr->type());
+                    nextList.insert(nodePtr);
+                }
             }
         }
 
+        Log::debug("********************");
+
         // Internal check to make sure we won't enter in an infinite loop!
         if (nextList == listNodes) {
             // We are stuck!
@@ -450,7 +499,6 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
             Log::warn("Unable to forward dimensions (circular dependency and/or wrong dimensions and/or data dependent dimension?). Unable to compute output dims for nodes {}.", nodesName);
             return false;
         }
-
         listNodes.swap(nextList);
     }
     while (!listNodes.empty());
@@ -1412,10 +1460,9 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
 }
 
 std::shared_ptr<Aidge::GraphView> Aidge::getConnectedGraphView(std::shared_ptr<Node> node) {
-  std::vector<NodePtr> foundNodes;
-  foundNodes.push_back(node);
+  std::vector<NodePtr> foundNodes{node};
 
-  for (size_t curNodeIdx = 0; curNodeIdx < foundNodes.size(); ++curNodeIdx) {
+  for (std::size_t curNodeIdx = 0; curNodeIdx < foundNodes.size(); ++curNodeIdx) {
     NodePtr curNode = foundNodes[curNodeIdx];
 
     for (auto childs : curNode->getOrderedChildren()) {
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..99b40fcb277ce1f22c5cd3a571eaaaa4910b6ba5
--- /dev/null
+++ b/src/operator/Conv.cpp
@@ -0,0 +1,158 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Conv.hpp"
+
+#include <cmath>      // std::floor
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <utility>    // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::Conv_Op<DIM>::Type = "Conv";
+
+template <Aidge::DimIdx_t DIM>
+Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
+    : OperatorTensor(op),
+      Attributes_(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    // check inputs have been associated
+    bool associated = true;
+    for (IOIndex_t i = 0; i < 3; ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        }
+        associated &= !(getInput(i)->empty());
+    }
+    if (associated) {
+        // first check weight since it defines inChannels and outChannels
+        AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
+                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
+        // check data
+        AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
+                    (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
+                    "Wrong input size for Conv operator.");
+        // check optional bias
+        if(!this->template getAttr<ConvAttr::NoBias>())
+            AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
+                    (getInput(2)->template dims<1>()[0] == outChannels()),
+                    "Wrong bias size for Conv operator.");
+        std::array<DimSize_t, DIM + 2> outputDims{};
+        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+
+        for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
+            const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
+                                                    (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
+                                            1;
+
+            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
+                            static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
+        }
+
+        outputDims[1] = outChannels();
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+    }
+
+    return associated;
+}
+
+
+template <Aidge::DimIdx_t DIM>
+std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
+Aidge::Conv_Op<DIM>::computeReceptiveField(
+                          const std::vector<Aidge::DimSize_t>& firstEltDims,
+                          const std::vector<Aidge::DimSize_t>& outputDims,
+                          const Aidge::IOIndex_t outputIdx) const
+{
+    if (outputIdx != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+    }
+    if (firstEltDims.size() != outputDims.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
+    }
+    if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
+        // Offset
+        auto inputIdxDims = firstEltDims; // batch idx is the same
+        inputIdxDims[1] = 0; // each channel is used so start with the first one
+
+        for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+            if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
+            }
+        }
+
+        // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+        // Input
+        // same batch value, every input channel is used
+        std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
+        for (DimIdx_t i = 0; i < DIM; ++i) {
+            inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+                        * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        + 1
+                        + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
+        }
+
+        // Weight
+        // same output value, every input channel is used
+        std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
+        for (std::size_t i = 0; i < DIM; ++i) {
+            weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
+        }
+        std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
+        weightIdxDims[0] = firstEltDims[1];
+
+        // Result
+        std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> res;
+        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
+        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
+
+        // Bias
+        if (! this->template getAttr<ConvAttr::NoBias>()){
+            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
+            const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
+            res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
+        }
+        return res;
+    }
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+
+    // By default, automatically set backend for weight and bias inputs
+    getInput(1)->setBackend(name, device);
+    getInput(2)->setBackend(name, device);
+}
+
+template class Aidge::Conv_Op<2>;
\ No newline at end of file
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..12aa0818b244ef0f3195de49467a464e057f2c73
--- /dev/null
+++ b/src/operator/ConvDepthWise.cpp
@@ -0,0 +1,158 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ConvDepthWise.hpp"
+
+#include <array>
+#include <cmath>      // std::floor
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <utility>    // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
+
+template <Aidge::DimIdx_t DIM>
+Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
+    : OperatorTensor(op),
+      Attributes_(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    // check inputs have been associated
+    // TODO : add a check of inputs dimensions ?
+    bool associated = true;
+    for (IOIndex_t i = 0; i < 3; ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        }
+        associated &= !(getInput(i)->empty());
+    }
+    if (associated) {
+        // first check weight since it defines nbChannels
+        AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
+                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
+        // check data
+        AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
+                    (getInput(0)->template dims<DIM+2>()[1] == nbChannels()),
+                    "Wrong input size for Conv operator.");
+        // check optional bias
+        if(!this->template getAttr<ConvDepthWiseAttr::NoBias>())
+            AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
+                    (getInput(2)->template dims<1>()[0] == nbChannels()),
+                    "Wrong bias size for Conv operator.");
+        std::array<DimSize_t, DIM + 2> outputDims = {};
+        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+
+        for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
+            const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
+                                                    (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
+                                            1;
+
+            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
+                            static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
+        }
+
+        outputDims[1] = inputDims[1];
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+    }
+
+    return associated;
+}
+
+
+template <Aidge::DimIdx_t DIM>
+std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
+Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
+                          const std::vector<Aidge::DimSize_t>& firstEltDims,
+                          const std::vector<Aidge::DimSize_t>& outputDims,
+                          const Aidge::IOIndex_t outputIdx) const
+{
+    if (outputIdx != 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+    }
+    if (firstEltDims.size() != outputDims.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
+    }
+    if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
+        // Offset
+        auto inputIdxDims = firstEltDims; // batch idx is the same
+
+        for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+            if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
+            }
+        }
+
+        // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+        // Input
+        // same batch value
+        std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
+        for (DimIdx_t i = 0; i < DIM; ++i) {
+            inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+                        * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        + 1
+                        + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
+        }
+
+        // Weight
+        std::vector<DimSize_t> weightDims{outputDims[1], 1};
+        for (std::size_t i = 0; i < DIM; ++i) {
+            weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
+        }
+        std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
+        weightIdxDims[0] = firstEltDims[1];
+
+
+        // Result
+        std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> res;
+        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
+        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
+        // Bias
+        if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
+            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
+            const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
+            res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
+        }
+        return res;
+    }
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+
+    // By default, automatically set backend for weight and bias inputs
+    getInput(1)->setBackend(name, device);
+    getInput(2)->setBackend(name, device);
+}
+
+template class Aidge::ConvDepthWise_Op<2>;
\ No newline at end of file
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index ba7e29e7b6543a570ceede6158bd306286037c10..d3bfd4557044c49b452de7690541a1c0a2ac62d9 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -45,8 +45,31 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
         associated &= !(getInput(i)->empty());
     }
     if (associated) {
+        // first check weight since it defines inChannels and outChannels
+        AIDGE_ASSERT((getInput(1)->nbDims() == 2),
+                    "Wrong weight Tensor dimension: {} for FC operator (should have 2 dimensions).", getInput(1)->nbDims());
+        const DimSize_t outChannels = getInput(1)->template dims<2>()[0];
+        const DimSize_t inChannels = getInput(1)->template dims<2>()[1];
+        // check data
+        const std::vector<DimSize_t>& inputDims = getInput(0)->dims();
+        if (getInput(0)->nbDims() == 1) {
+            AIDGE_ASSERT(inputDims[0] == inChannels,
+                "Wrong number of input features for input data ({}), expected {}",
+                inputDims[0], inChannels);
+        } else {
+            AIDGE_ASSERT(getInput(0)->nbDims() > 1, "FC input data must have at least one dimension");
+            const DimSize_t nbInputFeatures = std::accumulate(inputDims.cbegin() + 1, inputDims.cend(), DimSize_t(1), std::multiplies<DimSize_t>());
+            AIDGE_ASSERT(nbInputFeatures == inChannels,
+                    "Wrong number of input features for input data ({}), expected {}",
+                    nbInputFeatures, inChannels);
+        }
+        // check optional bias
+        if(!this->template getAttr<FCAttr::NoBias>())
+            AIDGE_ASSERT((getInput(2)->nbDims() == 1) &&
+                    (getInput(2)->template dims<1>()[0] == outChannels),
+                    "Wrong bias size for FC operator.");
         // <batch, OutChannels>
-        mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
+        mOutputs[0]->resize({getInput(0)->dims()[0], outChannels});
     }
 
     return associated;
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 36ff1854703d015980a1943390eb87d0863d877f..1397b69b9c126c0e2d0ec84bf900a320b95f0d80 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -58,16 +58,6 @@ void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
 }
 
-void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, std::shared_ptr<Data>&& data) {
-    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
-
-    const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
-    inputOp.first->getOperator()->setInput(inputOp.second, std::forward<std::shared_ptr<Data>>(data));
-
-    // Associate inputs for custom implementation
-    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
-}
-
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
     if (mImpl) {
         return mImpl->getNbRequiredData(inputIdx);
diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cd993f9e5cd127a005101284b78c416150b3c99a
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/LSTM.cpp
@@ -0,0 +1,228 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+#include <string>
+
+#include "aidge/operator/Memorize.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/Identity.hpp"
+#include "aidge/operator/Concat.hpp"
+#include "aidge/operator/Tanh.hpp"
+
+namespace Aidge {
+
+std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
+                           const DimSize_t hiddenChannel,
+                           const DimSize_t seqLength,
+                           bool noBias,
+                           const std::string& name)
+{
+    // Construct micro-graph
+    auto input = Identity((!name.empty()) ? name + "_input" : "");
+    auto hiddenState = Memorize(seqLength, (!name.empty()) ? name + "_hidden_state" : "");
+    auto cellState = Memorize(seqLength, (!name.empty()) ? name + "_cell_state" : "");
+    auto add = Add(2, (!name.empty()) ? name + "_add" : "");
+
+    // Forget gate
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_forgetGateX" : "");
+    input->addChild(forgetGateX, 0, 0);
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_forgetGateH" : "");
+    hiddenState->addChild(forgetGateH, 1, 0);
+    auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : "");
+    forgetGateX->addChild(forgetGate, 0, 0);
+    forgetGateH->addChild(forgetGate, 0, 1);
+    auto forgetGateAct = Sigmoid((!name.empty()) ? name + "_forgetGateAct" : "");
+    auto forgetGateMul = Mul((!name.empty()) ? name + "_forgetGateMul" : "");
+    forgetGate->addChild(forgetGateAct, 0, 0);
+    forgetGateAct->addChild(forgetGateMul, 0, 0);
+    forgetGateMul->addChild(add, 0, 0);
+    cellState->addChild(forgetGateMul, 1, 1);
+
+    // Input gate
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_inputGateX" : "");
+    input->addChild(inputGateX, 0, 0);
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_inputGateH" : "");
+    hiddenState->addChild(inputGateH, 1, 0);
+    auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : "");
+    inputGateX->addChild(inputGate, 0, 0);
+    inputGateH->addChild(inputGate, 0, 1);
+    auto inputGateAct = Sigmoid((!name.empty()) ? name + "_inputGateAct" : "");
+    auto inputGateMul = Mul((!name.empty()) ? name + "_inputGateMul" : "");
+    inputGate->addChild(inputGateAct, 0, 0);
+    inputGateAct->addChild(inputGateMul, 0, 0);
+    inputGateMul->addChild(add, 0, 1);
+
+    // Candidate for cell update
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_cellCandidateX" : "");
+    input->addChild(cellCandidateX, 0, 0);
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_cellCandidateH" : "");
+    hiddenState->addChild(cellCandidateH, 1, 0);
+    auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : "");
+    cellCandidateX->addChild(cellCandidate, 0, 0);
+    cellCandidateH->addChild(cellCandidate, 0, 1);
+    auto cellCandidateAct = Tanh((!name.empty()) ? name + "_cellCandidateAct" : "");
+    cellCandidate->addChild(cellCandidateAct, 0, 0);
+    cellCandidateAct->addChild(inputGateMul, 0, 1);
+
+    // Output gate
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_outputGateX" : "");
+    input->addChild(outputGateX, 0, 0);
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_outputGateH" : "");
+    hiddenState->addChild(outputGateH, 1, 0);
+    auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : "");
+    outputGateX->addChild(outputGate, 0, 0);
+    outputGateH->addChild(outputGate, 0, 1);
+    auto outputGateAct = Sigmoid((!name.empty()) ? name + "_outputGateAct" : "");
+    auto outputGateMul = Mul((!name.empty()) ? name + "_outputGateMul" : "");
+    outputGate->addChild(outputGateAct, 0, 0);
+    outputGateAct->addChild(outputGateMul, 0, 0);
+
+    // Updated cell state to help determine new hidden state
+    auto cellUpdatedAct = Tanh((!name.empty()) ? name + "_cellUpdatedAct" : "");
+    add->addChild(cellUpdatedAct, 0, 0);
+    cellUpdatedAct->addChild(outputGateMul, 0, 1);
+    outputGateMul->addChild(hiddenState, 0, 0);
+    add->addChild(cellState, 0, 0);
+
+    std::shared_ptr<GraphView> microGraph = std::make_shared<GraphView>();
+    microGraph->add(input);
+    microGraph->add({hiddenState, cellState, add,
+        forgetGateX, forgetGateH, forgetGate, forgetGateAct, forgetGateMul,
+        inputGateX, inputGateH, inputGate, inputGateAct, inputGateMul,
+        cellCandidateX, cellCandidateH, cellCandidate, cellCandidateAct,
+        outputGateX, outputGateH, outputGate, outputGateAct, outputGateMul,
+        cellUpdatedAct}, false);
+
+    microGraph->setOrderedInputs({{input, 0},
+        {inputGateX, 1}, {outputGateX, 1}, {forgetGateX, 1}, {cellCandidateX, 1},
+        {inputGateH, 1}, {outputGateH, 1}, {forgetGateH, 1}, {cellCandidateH, 1},
+        {inputGateX, 2}, {outputGateX, 2}, {forgetGateX, 2}, {cellCandidateX, 2},
+        {inputGateH, 2}, {outputGateH, 2}, {forgetGateH, 2}, {cellCandidateH, 2},
+        {hiddenState, 1}, {cellState, 1}});
+    microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
+
+    auto metaOp = MetaOperator("LSTM", microGraph, name);
+    addProducer(metaOp, 1, {hiddenChannel, inChannel}, "wi");
+    addProducer(metaOp, 2, {hiddenChannel, inChannel}, "wo");
+    addProducer(metaOp, 3, {hiddenChannel, inChannel}, "wf");
+    addProducer(metaOp, 4, {hiddenChannel, inChannel}, "wc");
+    addProducer(metaOp, 5, {hiddenChannel, hiddenChannel}, "ri");
+    addProducer(metaOp, 6, {hiddenChannel, hiddenChannel}, "ro");
+    addProducer(metaOp, 7, {hiddenChannel, hiddenChannel}, "rf");
+    addProducer(metaOp, 8, {hiddenChannel, hiddenChannel}, "rc");
+    addProducer(metaOp, 9, {(noBias ? 0 : hiddenChannel)}, "wbi");
+    addProducer(metaOp, 10, {(noBias ? 0 : hiddenChannel)}, "wbo");
+    addProducer(metaOp, 11, {(noBias ? 0 : hiddenChannel)}, "wbf");
+    addProducer(metaOp, 12, {(noBias ? 0 : hiddenChannel)}, "wbc");
+    addProducer(metaOp, 13, {(noBias ? 0 : hiddenChannel)}, "rbi");
+    addProducer(metaOp, 14, {(noBias ? 0 : hiddenChannel)}, "rbo");
+    addProducer(metaOp, 15, {(noBias ? 0 : hiddenChannel)}, "rbf");
+    addProducer(metaOp, 16, {(noBias ? 0 : hiddenChannel)}, "rbc");
+    return metaOp;
+}
+
+std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
+                                         bool noBias)
+{
+    // Construct micro-graph
+    auto input = Identity("");
+    auto hiddenState = Memorize(seqLength, "");
+    auto cellState = Memorize(seqLength, "");
+    auto add = Add(2, "");
+
+    // Forget gate
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    input->addChild(forgetGateX, 0, 0);
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    hiddenState->addChild(forgetGateH, 1, 0);
+    auto forgetGate = Add(2, "");
+    forgetGateX->addChild(forgetGate, 0, 0);
+    forgetGateH->addChild(forgetGate, 0, 1);
+    auto forgetGateAct = Sigmoid("");
+    auto forgetGateMul = Mul("");
+    forgetGate->addChild(forgetGateAct, 0, 0);
+    forgetGateAct->addChild(forgetGateMul, 0, 0);
+    forgetGateMul->addChild(add, 0, 0);
+    cellState->addChild(forgetGateMul, 1, 1);
+
+    // Input gate
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    input->addChild(inputGateX, 0, 0);
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    hiddenState->addChild(inputGateH, 1, 0);
+    auto inputGate = Add(2, "");
+    inputGateX->addChild(inputGate, 0, 0);
+    inputGateH->addChild(inputGate, 0, 1);
+    auto inputGateAct = Sigmoid("");
+    auto inputGateMul = Mul("");
+    inputGate->addChild(inputGateAct, 0, 0);
+    inputGateAct->addChild(inputGateMul, 0, 0);
+    inputGateMul->addChild(add, 0, 1);
+
+    // Candidate for cell update
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    input->addChild(cellCandidateX, 0, 0);
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    hiddenState->addChild(cellCandidateH, 1, 0);
+    auto cellCandidate = Add(2, "");
+    cellCandidateX->addChild(cellCandidate, 0, 0);
+    cellCandidateH->addChild(cellCandidate, 0, 1);
+    auto cellCandidateAct = Tanh("");
+    cellCandidate->addChild(cellCandidateAct, 0, 0);
+    cellCandidateAct->addChild(inputGateMul, 0, 1);
+
+    // Output gate
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    input->addChild(outputGateX, 0, 0);
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    hiddenState->addChild(outputGateH, 1, 0);
+    auto outputGate = Add(2,"");
+    outputGateX->addChild(outputGate, 0, 0);
+    outputGateH->addChild(outputGate, 0, 1);
+    auto outputGateAct = Sigmoid("");
+    auto outputGateMul = Mul("");
+    outputGate->addChild(outputGateAct, 0, 0);
+    outputGateAct->addChild(outputGateMul, 0, 0);
+
+    // Updated cell state to help determine new hidden state
+    auto cellUpdatedAct = Tanh("");
+    add->addChild(cellUpdatedAct, 0, 0);
+    cellUpdatedAct->addChild(outputGateMul, 0, 1);
+    outputGateMul->addChild(hiddenState, 0, 0);
+    add->addChild(cellState, 0, 0);
+
+    std::shared_ptr<GraphView> microGraph = std::make_shared<GraphView>();
+    microGraph->add(input);
+    microGraph->add({hiddenState, cellState, add,
+        forgetGateX, forgetGateH, forgetGate, forgetGateAct, forgetGateMul,
+        inputGateX, inputGateH, inputGate, inputGateAct, inputGateMul,
+        cellCandidateX, cellCandidateH, cellCandidate, cellCandidateAct,
+        outputGateX, outputGateH, outputGate, outputGateAct, outputGateMul,
+        cellUpdatedAct}, false);
+
+    microGraph->setOrderedInputs({{input, 0},
+        {inputGateX, 1}, {outputGateX, 1}, {forgetGateX, 1}, {cellCandidateX, 1},
+        {inputGateH, 1}, {outputGateH, 1}, {forgetGateH, 1}, {cellCandidateH, 1},
+        {inputGateX, 2}, {outputGateX, 2}, {forgetGateX, 2}, {cellCandidateX, 2},
+        {inputGateH, 2}, {outputGateH, 2}, {forgetGateH, 2}, {cellCandidateH, 2},
+        {hiddenState, 1}, {cellState, 1}});
+    microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
+
+    return std::make_shared<MetaOperator_Op>("LSTM", microGraph);
+}
+
+} // namespace Aidge
diff --git a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ad300cd4f98b84d5ac5834370db53017958efaf6
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
@@ -0,0 +1,91 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+#include <string>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/AvgPooling.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+//////////////////////////////////
+// Node functions
+//////////////////////////////////
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name,
+                                  const std::array<DimSize_t, DIM> &stride_dims,
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims)
+{
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
+        AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
+    });
+
+    return MetaOperator("PaddedAvgPooling", graph, name);
+}
+
+template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
+template std::shared_ptr<Node> PaddedAvgPooling<2>(const std::array<DimSize_t,2>&, const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
+template std::shared_ptr<Node> PaddedAvgPooling<3>(const std::array<DimSize_t,3>&, const std::string&, const std::array<DimSize_t,3>&, const std::array<DimSize_t,6>&);
+template std::shared_ptr<Node> PaddedAvgPooling<4>(const std::array<DimSize_t,4>&, const std::string&, const std::array<DimSize_t,4>&, const std::array<DimSize_t,8>&);
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+template <DimSize_t DIM>
+std::shared_ptr<Node> PaddedAvgPooling(const DimSize_t (&kernel_dims)[DIM],
+                                       const std::string& name,
+                                       const std::array<DimSize_t, DIM> &stride_dims,
+                                       const std::array<DimSize_t, 2*DIM> &padding_dims)
+{
+    return PaddedAvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+}
+
+template std::shared_ptr<Node> PaddedAvgPooling<1>(const DimSize_t (&kernel_dims)[1], const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
+template std::shared_ptr<Node> PaddedAvgPooling<2>(const DimSize_t (&kernel_dims)[2], const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
+template std::shared_ptr<Node> PaddedAvgPooling<3>(const DimSize_t (&kernel_dims)[3], const std::string&, const std::array<DimSize_t,3>&, const std::array<DimSize_t,6>&);
+template std::shared_ptr<Node> PaddedAvgPooling<4>(const DimSize_t (&kernel_dims)[4], const std::string&, const std::array<DimSize_t,4>&, const std::array<DimSize_t,8>&);
+
+
+//////////////////////////////////
+// Operator functions
+//////////////////////////////////
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                                  const std::array<DimSize_t, DIM> &stride_dims,
+                                  const std::array<DimSize_t, 2*DIM> &padding_dims)
+{
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, ""),
+        AvgPooling(kernel_dims, "", stride_dims)
+    });
+
+    return std::make_shared<MetaOperator_Op>("PaddedAvgPooling", graph);
+}
+
+template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
+template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<2>(const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
+template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<3>(const std::array<DimSize_t,3>&, const std::array<DimSize_t,3>&, const std::array<DimSize_t,6>&);
+template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<4>(const std::array<DimSize_t,4>&, const std::array<DimSize_t,4>&, const std::array<DimSize_t,8>&);
+
+
+} // namespace Aidge
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index 25c9deb2adaca65748d7f6981de574d0a674af5d..af20c1ff4ddd71479fcc899f7fe87be1d0000c72 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -62,15 +62,6 @@ void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, const std:
 
 Aidge::OperatorTensor::~OperatorTensor() = default;
 
-void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, std::shared_ptr<Aidge::Data>&& data) {
-    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
-    if (getInput(inputIdx)) {
-        *mInputs[inputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data));
-    } else {
-        mInputs[inputIdx] = std::make_shared<Tensor>(std::move(*std::dynamic_pointer_cast<Tensor>(data)));
-    }
-}
-
 std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawInput(const Aidge::IOIndex_t inputIdx) const {
     return std::static_pointer_cast<Data>(getInput(inputIdx));
 }
@@ -88,15 +79,6 @@ void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const st
     *mOutputs[outputIdx] = *data_tensor;
 }
 
-void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) {
-    AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
-    AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs());
-    auto&& data_tensor = std::dynamic_pointer_cast<Tensor>(data);
-    // if (mImpl)
-    //     AIDGE_ASSERT(data_tensor->getImpl()->backend() == backend(), "Data parameter and Operator have different backends: {} and {}", data_tensor->getImpl()->backend(), backend());
-    *mOutputs[outputIdx] = std::move(*data_tensor);
-}
-
 std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawOutput(const Aidge::IOIndex_t outputIdx) const {
     return std::static_pointer_cast<Data>(getOutput(outputIdx));
 }
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d11cf39e1cd301d49f21863dcb1f250e96c6e502
--- /dev/null
+++ b/src/operator/Shape.cpp
@@ -0,0 +1,70 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>  // std::size_t
+#include <cstdint>  // std::int64_t
+#include <string>
+#include <vector>
+
+#include "aidge/operator/Shape.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::Shape_OpImpl::forward() {
+    const Shape_Op& op = dynamic_cast<const Shape_Op&>(mOp);
+    const auto start = op.template getAttr<std::int64_t>("Start");
+    const auto end = op.template getAttr<std::int64_t>("End");
+
+    op.getOutput(0)->getImpl()->copyCast(std::next(op.getInput(0)->dims().data(), 
+                                                   start),
+                                         DataType::UInt64,
+                                         end - start + 1);
+}
+
+const std::string Aidge::Shape_Op::Type = "Shape";
+
+bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
+    // check data input has been associated
+    if (!getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
+    }
+
+    if (getInput(0)->empty()) {
+        return false;
+    }
+
+    if (this->template getAttr<std::int64_t>("Start") < 0)
+        this->template getAttr<std::int64_t>("Start") += static_cast<std::int64_t>(getInput(0)->nbDims());
+    if (this->template getAttr<std::int64_t>("End") < 0)
+        this->template getAttr<std::int64_t>("End") += static_cast<std::int64_t>(getInput(0)->nbDims());
+
+    const auto start = this->template getAttr<std::int64_t>("Start");
+    const auto end = this->template getAttr<std::int64_t>("End");
+    const auto nbDims = static_cast<std::int64_t>(getInput(0)->nbDims());
+    const DimSize_t roi = end - start + 1;
+
+    AIDGE_ASSERT(start < nbDims && end < nbDims, "'Start' and 'End' must be < {}", nbDims);
+    AIDGE_ASSERT(roi> 1, "Unvalid ROI for Shape");
+
+    mOutputs[0]->resize({roi});
+    return true;
+}
+
+void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Shape_Op>::exists({name})) {
+        SET_IMPL_MACRO(Shape_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Shape_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5d0493ea4da0b80bf572a33fa4ee466804d0d270
--- /dev/null
+++ b/src/operator/Split.cpp
@@ -0,0 +1,142 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Split.hpp"
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <fmt/format.h>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::Split_OpImpl::forward() {
+    const Split_Op& op = dynamic_cast<const Split_Op&>(mOp);
+    const auto axis = op.template getAttr<std::int8_t>("Axis");
+    const auto splits = op.template getAttr<std::vector<DimSize_t>>("Split");
+    const auto dims = op.getInput(0)->dims();
+
+    //Compute pre/post axis strides
+    const std::size_t stride_pre = std::accumulate(dims.cbegin(), dims.cbegin() + axis, 1, std::multiplies<std::size_t>());
+    const std::size_t stride_post = std::accumulate(dims.crbegin(), dims.crbegin() + dims.size() -1 - axis, 1, std::multiplies<std::size_t>());
+    for (auto i = 0; i < op.nbOutputs(); ++i)
+    {
+        DimIdx_t chunkIdxOnAxis = std::accumulate(splits.cbegin(), splits.cbegin() + i, 0) * stride_post;
+        DimIdx_t offset = 0;
+        for (std::size_t j = 0; j < stride_pre; ++j)
+        {
+            // Compute chunk position in input tensor
+            DimIdx_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
+            // Copy chunk in ouput
+            op.getOutput(i)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(idx),
+                                             splits[i] * stride_post, offset);
+            offset += splits[i] * stride_post;
+        }
+
+    }
+}
+
+const std::string Aidge::Split_Op::Type = "Split";
+
+bool Aidge::Split_Op::dimsForwarded() const {
+    if ((getInput(1) && !getInput(1)->empty()))
+    {
+        // output dims are data dependent
+        return false;
+    }
+
+    return OperatorTensor::dimsForwarded();
+}
+
+bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
+    // check inputs have been associated
+    if (!getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
+    }
+
+    if (getInput(0)->empty()) {
+        return false;
+    }
+
+    std::shared_ptr<Tensor> fallback;
+
+    if (getInput(1) && !getInput(1)->empty()) { // Split is given, replace
+        if (!this->template getAttr<SplitAttr::Split>().empty()) {
+            Log::notice("Split_Op: ignoring non-empty Split attribute because input#1 takes precedence");
+        }
+
+        if (!allowDataDependency) {
+            Log::warn("Split_Op: unable to forwardDims() because output dims are data dependent on input#1");
+            return false;
+        }
+
+        this->template getAttr<SplitAttr::Split>().reserve(getInput(1)->size());
+        const auto& splits = getInput(1)->refCastFrom(fallback, NativeType<DimSize_t>::type, "cpu");
+        std::copy_n(static_cast<DimSize_t*>(splits.getImpl()->hostPtr()),
+                    splits.size(),
+                    std::back_inserter(this->template getAttr<SplitAttr::Split>()));
+    }
+
+    if (this->template getAttr<std::int8_t>("Axis") < 0)
+        this->template getAttr<std::int8_t>("Axis") += static_cast<std::int8_t>(getInput(0)->nbDims());
+
+    DimSize_t dimToSplit = getInput(0)->dims()[this->template getAttr<std::int8_t>("Axis")];
+    DimSize_t nbOutput = this->nbOutputs();
+    // Fill Split attr if empty
+    if(this->template getAttr<SplitAttr::Split>().empty()) {
+        // In case the input Split is not provided, divide the dimension of Axis into equal slices
+        AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} musn't be bigger than dimension {}.", nbOutput, dimToSplit);
+        DimSize_t baseSliceSize = dimToSplit / nbOutput;
+
+        DimSize_t remainder = dimToSplit % nbOutput;
+
+        for (DimSize_t i = 0; i < static_cast<DimSize_t>(nbOutput -1); ++i) {
+                this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize);
+        }
+        this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize + remainder);
+    }
+
+    const auto splits = this->template getAttr<SplitAttr::Split>();
+    AIDGE_ASSERT(splits.size() == nbOutput, "Split_Op: number of slices {} must be equal to number of outputs {}", splits, nbOutput);
+    DimSize_t totalSplitSize = std::accumulate(splits.cbegin(), splits.cend(), 0);
+    AIDGE_ASSERT(totalSplitSize == dimToSplit, "Split_Op: Total chunks size {} is different from dimension size {}.", totalSplitSize, dimToSplit);
+
+    std::vector<DimSize_t> outDims = getInput(0)->dims();
+    for (std::size_t i = 0; i < nbOutput; ++i)
+    {
+        outDims[this->template getAttr<std::int8_t>("Axis")] = this->template getAttr<SplitAttr::Split>()[i];
+        mOutputs[i]->resize(outDims);
+    }
+
+    return true;
+}
+
+void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Split_Op>::exists({name})) {
+        SET_IMPL_MACRO(Split_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Split_OpImpl>(*this);
+    }
+    for (std::size_t i = 0; i < this->nbOutputs(); i++)
+    {
+        mOutputs[i]->setBackend(name, device);
+    }
+    
+}
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 08c4770e3fb43fe819a924dd963356401c3ce801..20b2e5a15508368a7a3ca3bbf80bd4174d98ae4e 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -23,7 +23,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-void Aidge::Transpose_OpImpl::forward() {
+void Aidge::TransposeImpl::forward() {
     const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
     const auto inputDims = op.getInput(0)->dims();
     const auto outputDims = op.getOutput(0)->dims();
@@ -83,7 +83,7 @@ void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
         SET_IMPL_MACRO(Transpose_Op, *this, name);
     }
     else {
-        mImpl = std::make_shared<Transpose_OpImpl>(*this);
+        mImpl = std::make_shared<TransposeImpl>(*this);
     }
     mOutputs[0]->setBackend(name, device);
 }
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 76c15a0627ee65ed23c2dc385d9cd3787f9f0979..21009318cddae7ce60a01592b19ab237a77fbd2b 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -58,14 +58,14 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
     if (convNode->type() == Conv_Op<2>::Type) {
         const std::shared_ptr<Conv_Op<2>> convOpPtr =
             std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
-        convNbOutChannels = convOpPtr->getAttr<DimSize_t>("OutChannels");
-        channelsSize = convOpPtr->getAttr<DimSize_t>("InChannels");
+        convNbOutChannels = convOpPtr->outChannels();
+        channelsSize = convOpPtr->inChannels();
         kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims");
     }
     else if (convNode->type() == ConvDepthWise_Op<2>::Type) {
         const std::shared_ptr<ConvDepthWise_Op<2>> convOpPtr =
             std::static_pointer_cast<ConvDepthWise_Op<2>>(convNode->getOperator());
-        convNbOutChannels = convOpPtr->getAttr<DimSize_t>("Channels");
+        convNbOutChannels = convOpPtr->nbChannels();
         kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims");
     }
 
diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/FuseMulAdd.cpp
index 6c849c54a916af10d4d926e7e0d0c339e757e01b..bb4b0e3db1974ccf106699b25fd71fc9cc09654c 100644
--- a/src/recipes/FuseMulAdd.cpp
+++ b/src/recipes/FuseMulAdd.cpp
@@ -90,7 +90,7 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
         fcName += "_" + addNode->name();
     }
 
-    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(outSize, bias ? false : true), fcName);
+    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(bias ? false : true), fcName);
 
     // Step 2 : Branch existing producers & create the others
     // link weights & bias
diff --git a/unit_tests/operator/Test_ShapeImpl.cpp b/unit_tests/operator/Test_ShapeImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..45df89df061148fbfb892112e3c6d01edf27ffb4
--- /dev/null
+++ b/unit_tests/operator/Test_ShapeImpl.cpp
@@ -0,0 +1,86 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Shape.hpp"
+
+#include <cstdint>
+#include <memory>
+
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Shape(forward)", "[Shape][CPU]") {
+    SECTION("Default attributes") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,1,2,3,5> {
+            {
+                {
+                    {
+                        { 1,  2,  3,  4,  5},
+                        { 6,  7,  8,  9, 10},
+                        {11, 12, 13, 14, 15}
+                    },
+                    {
+                        {16, 17, 18, 19, 20},
+                        {21, 22, 23, 24, 25},
+                        {26, 27, 28, 29, 30}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,4> {
+            {1, 2, 3, 5}
+        });
+
+        std::shared_ptr<Node> myShape = Shape();
+        auto op = std::static_pointer_cast<OperatorTensor>(myShape -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myShape->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+
+    }
+    SECTION("Using attributes") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,1,2,3,5> {
+            {
+                {
+                    {
+                        { 1,  2,  3,  4,  5},
+                        { 6,  7,  8,  9, 10},
+                        {11, 12, 13, 14, 15}
+                    },
+                    {
+                        {16, 17, 18, 19, 20},
+                        {21, 22, 23, 24, 25},
+                        {26, 27, 28, 29, 30}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,2> {
+            {2, 3}
+        });
+
+        std::shared_ptr<Node> myShape = Shape(1, 2);
+        auto op = std::static_pointer_cast<OperatorTensor>(myShape -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myShape->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_SplitImpl.cpp b/unit_tests/operator/Test_SplitImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5f6f191fccbc0ee5331a9ccaf83563e169eb6abe
--- /dev/null
+++ b/unit_tests/operator/Test_SplitImpl.cpp
@@ -0,0 +1,119 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Split.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Split(forward)", "[Split][CPU]") {
+    std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,1,3,7,2> {
+        {
+            {
+                {{ 1,  2},{ 3,  4},{ 5,  6},{ 7,  8},{ 9, 10},{11, 12},{13, 14}},
+                {{15, 16},{17, 18},{19, 20},{21, 22},{23, 24},{25, 26},{27, 28}},
+                {{30, 31},{32, 33},{34, 35},{36, 37},{38, 39},{40, 41},{42, 43}}
+            }
+        }
+    });
+    SECTION("Default split") {
+        std::shared_ptr<Tensor> expectedOutput0 = std::make_shared<Tensor>(Array4D<int,1,3,2,2> {
+            {
+                {
+                    {{ 1,  2},{ 3,  4}},
+                    {{15, 16},{17, 18}},
+                    {{30, 31},{32, 33}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int,1,3,2,2> {
+            {
+                {
+                    {{ 5,  6},{ 7,  8}},
+                    {{19, 20},{21, 22}},
+                    {{34, 35},{36, 37}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput2 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
+            {
+                {
+                    {{ 9, 10},{11, 12},{13, 14}},
+                    {{23, 24},{25, 26},{27, 28}},
+                    {{38, 39},{40, 41},{42, 43}}
+                }
+            }
+        });
+        auto mySplit = Split(DimSize_t(3), int8_t(2)); // Split on axis 2 into 3 outputs
+        mySplit->getOperator()->associateInput(0, input);
+        mySplit->getOperator()->setBackend("cpu");
+        mySplit->getOperator()->setDataType(DataType::Int32);
+        mySplit->forward();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(mySplit->getOperator())->getOutput(0) == *expectedOutput0);
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(mySplit->getOperator())->getOutput(1) == *expectedOutput1);
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(mySplit->getOperator())->getOutput(2) == *expectedOutput2);
+    }
+    SECTION("Split with different chunk size") {
+        std::shared_ptr<Tensor> expectedOutput0 = std::make_shared<Tensor>(Array4D<int,1,3,4,2> {
+            {
+                {
+                    {{ 1,  2},{ 3,  4},{ 5,  6},{ 7,  8}},
+                    {{15, 16},{17, 18},{19, 20},{21, 22}},
+                    {{30, 31},{32, 33},{34, 35},{36, 37}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int,1,3,1,2> {
+            {
+                {
+                    {{ 9, 10}},
+                    {{23, 24}},
+                    {{38, 39}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput2 = std::make_shared<Tensor>(Array4D<int,1,3,2,2> {
+            {
+                {
+                    {{11, 12},{13, 14}},
+                    {{25, 26},{27, 28}},
+                    {{40, 41},{42, 43}}
+                }
+            }
+        });
+        auto mySplit = Split(DimSize_t(3), int8_t(2), {DimSize_t(4), DimSize_t(1), DimSize_t(2)});
+        mySplit->getOperator()->associateInput(0, input);
+        mySplit->getOperator()->setBackend("cpu");
+        mySplit->getOperator()->setDataType(DataType::Int32);
+        mySplit->forward();
+
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(mySplit->getOperator())->getOutput(0) == *expectedOutput0);
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(mySplit->getOperator())->getOutput(1) == *expectedOutput1);
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(mySplit->getOperator())->getOutput(2) == *expectedOutput2);
+    }
+    SECTION("Split with bad split attribute") {
+        auto mySplit = Split(DimSize_t(3), int8_t(2), {DimSize_t(4), DimSize_t(1), DimSize_t(3)});
+        mySplit->getOperator()->associateInput(0, input);
+        mySplit->getOperator()->setBackend("cpu");
+        mySplit->getOperator()->setDataType(DataType::Int32);
+        REQUIRE_THROWS(mySplit->forward());
+    }
+    SECTION("Split with bad outNumber") {
+        auto mySplit = Split(DimSize_t(8), int8_t(2));
+        mySplit->getOperator()->associateInput(0, input);
+        mySplit->getOperator()->setBackend("cpu");
+        mySplit->getOperator()->setDataType(DataType::Int32);
+        REQUIRE_THROWS(mySplit->forward());
+    }
+}
\ No newline at end of file