From c20cefc9aa6d8bf721baa3465646675f3e2e017a Mon Sep 17 00:00:00 2001
From: NAUD Maxence <maxence.naud@cea.fr>
Date: Tue, 30 Jul 2024 16:24:19 +0000
Subject: [PATCH] Move constructor, and clone functions from operators' header
 to src and add src file for PaddedConv and PaddedConvDepthWise

---
 include/aidge/graph/Matching.hpp              |  52 +++++----
 include/aidge/graph/OpArgs.hpp                |   4 +
 include/aidge/operator/Add.hpp                |  16 +--
 include/aidge/operator/AvgPooling.hpp         |  11 +-
 include/aidge/operator/BatchNorm.hpp          |   6 +-
 include/aidge/operator/Cast.hpp               |   5 +-
 include/aidge/operator/Concat.hpp             |  31 +-----
 include/aidge/operator/Conv.hpp               |  13 +--
 include/aidge/operator/ConvDepthWise.hpp      |  13 +--
 include/aidge/operator/Div.hpp                |   7 +-
 include/aidge/operator/Erf.hpp                |  18 +--
 include/aidge/operator/FC.hpp                 |  15 +--
 include/aidge/operator/Fold.hpp               |  24 +---
 include/aidge/operator/Gather.hpp             |  30 +----
 include/aidge/operator/GenericOperator.hpp    |  42 ++-----
 .../aidge/operator/GlobalAveragePooling.hpp   |  20 +---
 include/aidge/operator/Identity.hpp           |  30 ++---
 include/aidge/operator/LeakyReLU.hpp          |  27 +----
 include/aidge/operator/Ln.hpp                 |  18 +--
 include/aidge/operator/MatMul.hpp             |  17 +--
 include/aidge/operator/MaxPooling.hpp         |  71 ++----------
 include/aidge/operator/Memorize.hpp           |  30 +----
 include/aidge/operator/MetaOperator.hpp       |  28 +----
 include/aidge/operator/MetaOperatorDefs.hpp   |  64 ++---------
 include/aidge/operator/Move.hpp               |  26 +----
 include/aidge/operator/Mul.hpp                |  19 +---
 include/aidge/operator/Pad.hpp                |  42 ++-----
 include/aidge/operator/Pop.hpp                |  27 +----
 include/aidge/operator/Pow.hpp                |   4 +-
 include/aidge/operator/Producer.hpp           |  44 ++------
 include/aidge/operator/ReLU.hpp               |  18 +--
 include/aidge/operator/ReduceMean.hpp         |  44 +-------
 include/aidge/operator/Reshape.hpp            |  34 +-----
 include/aidge/operator/Resize.hpp             |  29 +----
 include/aidge/operator/Scaling.hpp            |  30 +----
 include/aidge/operator/Shape.hpp              |  29 +----
 include/aidge/operator/ShiftGELU.hpp          |  20 +---
 include/aidge/operator/ShiftMax.hpp           |  20 +---
 include/aidge/operator/Sigmoid.hpp            |  29 +----
 include/aidge/operator/Slice.hpp              |  32 ++----
 include/aidge/operator/Softmax.hpp            |  25 +----
 include/aidge/operator/Split.hpp              |  30 +----
 include/aidge/operator/Sqrt.hpp               |  29 ++---
 include/aidge/operator/Sub.hpp                |  25 +----
 include/aidge/operator/Tanh.hpp               |  20 +---
 include/aidge/operator/Transpose.hpp          |  30 +----
 include/aidge/operator/Unfold.hpp             |  38 ++-----
 src/graph/Matching.cpp                        |  39 ++++++-
 src/graph/OpArgs.cpp                          |   7 +-
 src/operator/Add.cpp                          |  16 +++
 src/operator/AvgPooling.cpp                   |  19 +++-
 src/operator/BatchNorm.cpp                    |   7 +-
 src/operator/Cast.cpp                         |   4 +
 src/operator/Concat.cpp                       |  36 +++++-
 src/operator/Conv.cpp                         |  24 +++-
 src/operator/ConvDepthWise.cpp                |  23 +++-
 src/operator/Div.cpp                          |   6 +
 src/operator/Erf.cpp                          |  20 ++++
 src/operator/FC.cpp                           |  17 +++
 src/operator/Fold.cpp                         |  33 +++++-
 src/operator/Gather.cpp                       |  41 ++++++-
 src/operator/GenericOperator.cpp              |  53 +++++++++
 src/operator/GlobalAveragePooling.cpp         |  22 +++-
 src/operator/Identity.cpp                     |  27 +++++
 src/operator/LeakyReLU.cpp                    |  33 +++++-
 src/operator/Ln.cpp                           |  20 ++++
 src/operator/MatMul.cpp                       |  20 ++++
 src/operator/MaxPooling.cpp                   | 104 ++++++++++++++++++
 src/operator/Memorize.cpp                     |  33 ++++++
 src/operator/MetaOperator.cpp                 |  28 +++++
 src/operator/MetaOperatorDefs/PaddedConv.cpp  |  74 +++++++++++++
 .../MetaOperatorDefs/PaddedConvDepthWise.cpp  |  74 +++++++++++++
 src/operator/Move.cpp                         |  27 +++++
 src/operator/Mul.cpp                          |  20 ++++
 src/operator/Pad.cpp                          |  52 +++++++++
 src/operator/Pop.cpp                          |  31 ++++++
 src/operator/Pow.cpp                          |   6 +
 src/operator/Producer.cpp                     |  87 +++++++++++++++
 src/operator/ReLU.cpp                         |  20 ++++
 src/operator/ReduceMean.cpp                   |  31 ++++++
 src/operator/Reshape.cpp                      |  36 ++++++
 src/operator/Resize.cpp                       |  43 +++++++-
 src/operator/Scaling.cpp                      |  33 ++++++
 src/operator/Shape.cpp                        |  33 ++++++
 src/operator/ShiftGELU.cpp                    |  22 ++++
 src/operator/ShiftMax.cpp                     |  26 +++++
 src/operator/Sigmoid.cpp                      |  23 ++++
 src/operator/Slice.cpp                        |  46 +++++++-
 src/operator/Softmax.cpp                      |  27 +++++
 src/operator/Split.cpp                        |  42 ++++++-
 src/operator/Sqrt.cpp                         |  22 ++++
 src/operator/Sub.cpp                          |  20 ++++
 src/operator/Tanh.cpp                         |  22 ++++
 src/operator/Transpose.cpp                    |  33 ++++++
 src/operator/Unfold.cpp                       |  54 ++++++++-
 95 files changed, 1751 insertions(+), 1021 deletions(-)
 create mode 100644 src/operator/MaxPooling.cpp
 create mode 100644 src/operator/MetaOperatorDefs/PaddedConv.cpp
 create mode 100644 src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp

diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
index 31bae71e9..fc8bfb335 100644
--- a/include/aidge/graph/Matching.hpp
+++ b/include/aidge/graph/Matching.hpp
@@ -29,6 +29,12 @@ namespace Aidge {
 class SinglePassGraphMatching {
 public:
     struct Context {
+        Context();
+        Context(const Context&); // explicitly define Context copy constructor
+                                 // to avoid automatic inlining
+        Context& operator=(const Context&);
+        ~Context() noexcept;
+
         std::string query;
         bool firstSequence = true;
         bool firstNode = true;
@@ -52,44 +58,36 @@ public:
         mutable std::map<std::string, std::map<std::string, NodePtr>> anchors;
         mutable NodePtr startNode;
 
-        MatchingResult() {
-            graph = std::make_shared<GraphView>();
-        }
+        MatchingResult();
 
-        MatchingResult(const MatchingResult& result) {
-            graph = std::make_shared<GraphView>(*(result.graph.get()));
-            anchors = result.anchors;
-            startNode = result.startNode;
-        }
-
-        MatchingResult& operator=(const MatchingResult& result) {
-            graph = std::make_shared<GraphView>(*(result.graph.get()));
-            anchors = result.anchors;
-            startNode = result.startNode;
-            return *this;
-        }
+        MatchingResult(const MatchingResult& other);
+        MatchingResult& operator=(const MatchingResult& other);
+        ~MatchingResult() noexcept;
     };
 
     SinglePassGraphMatching(std::shared_ptr<GraphView> graph) : mGraph(graph) {}
+    SinglePassGraphMatching(const SinglePassGraphMatching& other);
+    SinglePassGraphMatching& operator=(const SinglePassGraphMatching& other);
+    ~SinglePassGraphMatching() noexcept;
 
     /**
      * Matches a query by direct, single pass parse and match.
      * The returned matches are non-ordered and therefore stored in a std::set.
-     * 
+     *
      * Some rules:
      * - The first node of the first sequence is the root node and cannot be optional
      *   WRONG: Conv?->ReLU (will throw an error)
      *   GOOD: ReLU<-Conv?
-     * 
+     *
      * - The first node of any further sequence must be an existing anchor
      *   (the anchor cannot be in the middle of the sequence)
      *   WRONG: Conv->ReLU;Pad->Conv (will throw an error)
      *          Pad->Conv;Conv->ReLU (will throw an error)
      *   GOOD: Conv#->ReLU;Conv#<-Pad
      *         Pad->Conv#;Conv#->ReLU
-     * 
+     *
      * - Any node already matched cannot be matched again (except for anchors)
-     * 
+     *
      * - By default, an edge matches the first output to the first input.
      *   EXAMPLE: ReLU->Conv is equivalent to ReLU-0-0>Conv
      *            To match the second input, use ReLU-0-1>Conv (or ReLU-1>Conv)
@@ -97,14 +95,14 @@ public:
      *            To match any input and/or any output, use *, like ReLU-1-*>Conv
      *            or ReLU-*-0>Conv or ReLU-*-*>Conv
      *            The same is true for the "<-" edge syntax.
-     * 
+     *
      * - When several nodes could match for a given node query, the first one
-     *   not already in the matching result is matched, following the 
+     *   not already in the matching result is matched, following the
      *   childs/parents ordered node list
      *   EXAMPLE: Producer in "Conv<*-Producer" will match the weights Producer first
      *   EXAMPLE: Producer in "Conv#<1-.;Conv#<*-Producer" will match the bias Producer
      *            because the weights Producer has already been matched
-     * 
+     *
      * - One always matches a sub-graph: additional connections can exist anywhere
      *   in the matched sub-graph
      *   EXAMPLE: "Add<*-." will match the Add operator and its first input, any
@@ -112,7 +110,7 @@ public:
      *   EXAMPLE: "(Add#<*-.)+" will match the Add operator and all of its inputs
      *            Note that the anchor is required since we intend to match several
      *            inputs of the same node!
-     * 
+     *
      * - In Aidge, a node output can be connected to multiple other nodes. In
      *   your query, you can allow it or not, with the "~" or "-" modifier.
      *   EXAMPLE: "Conv->ReLU" will match the Conv that are **only** connected
@@ -121,7 +119,7 @@ public:
      *            if they are also connected to other nodes at the same output #0.
      *   When implementing a match & replace recipe, beware that you don't break
      *   branches in the middle of your matching result if you use "~"!
-     * 
+     *
      * - The matching results can be overlapping, meaning that some nodes may be
      *   found in multiple results. Some results may be subsets of other results.
      *   EXAMPLE: assume graph Conv#1->ReLU#1->Conv#2->ReLU#2
@@ -129,11 +127,11 @@ public:
      *            Conv#1->ReLU#1->Conv#2->ReLU#2 and Conv#2->ReLU#2
      *   To avoid this behavior, set the disjoint argument to true. In this case,
      *   only Conv#1->ReLU#1->Conv#2->ReLU#2 will be kept in the example above.
-     * 
+     *
      * - Whitespaces are allowed anywhere in the query
-     * 
+     *
      * QUERY = SEQ | NODE_OR_BLOCK (';' (SEQ | NODE_OR_BLOCK))*
-     * 
+     *
      * @param query The query to search.
      * @param disjoint If true, only keep the longuest disjoint (non-overlapping) matches.
      * @return Set of matches, each stored in a MatchingResult struct.
diff --git a/include/aidge/graph/OpArgs.hpp b/include/aidge/graph/OpArgs.hpp
index 9d1ba6fd1..bf14d39af 100644
--- a/include/aidge/graph/OpArgs.hpp
+++ b/include/aidge/graph/OpArgs.hpp
@@ -34,6 +34,10 @@ public:
     OpArgs(const std::shared_ptr<Node>& node_)
      : mNode(node_) {assert(mNode && "The Node provided should not be a nullptr.");}
 
+    OpArgs(const OpArgs&);
+    OpArgs& operator=(const OpArgs&);
+    ~OpArgs() noexcept;
+
     inline std::shared_ptr<Node> node() const noexcept {
         return mNode;
     }
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 0e709afe9..97db47672 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -28,13 +28,7 @@ class Add_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Add_Op(const IOIndex_t nbIn)
-        : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
-    {
-        if (nbIn == 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
-        }
-    }
+    Add_Op(const IOIndex_t nbIn);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -46,9 +40,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Add_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Add_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
@@ -72,9 +64,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
-}
+std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_ADD_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 920829473..b2f4ce925 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -64,9 +64,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::AvgPooling_Op
      */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<AvgPooling_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override final;
 
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
@@ -93,12 +91,9 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                            const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
-    return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
-}
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 08d1f6a88..7f1f63c68 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -64,9 +64,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::BatchNorm_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<BatchNorm_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
@@ -103,11 +101,11 @@ std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures,
                                        const float epsilon = 1.0e-5F,
                                        const float momentum = 0.1F,
                                        const std::string& name = "");
+}  // namespace Aidge
 
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&);
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&);
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&);
-}  // namespace Aidge
 
 namespace {
 template <>
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 291669b7c..fd12f551a 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -86,9 +86,8 @@ public:
 };
 
 
-inline std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
-}
+std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "");
+
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index ab14bf527..46cd3a5a3 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -50,40 +50,19 @@ private:
 public:
     Concat_Op() = delete;
 
-    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis)
-        : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ConcatAttr::Axis>(axis)))
-    {
-        if (nbIn == 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
-        }
-        mImpl = std::make_shared<Concat_OpImpl>(*this);
-    }
+    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Concat_Op(const Concat_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Concat_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Concat_OpImpl>(*this);
-        }
-    }
+    Concat_Op(const Concat_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Concat_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Concat_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -100,9 +79,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
-}
+std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "");
 }
 
 namespace {
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index e89c94f96..7366472d2 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -140,22 +140,13 @@ public:
  * @return std::shared_ptr<Node> A Node containing the operator.
  */
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
+std::shared_ptr<Node> Conv(DimSize_t inChannels,
                                   DimSize_t outChannels,
                                   const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                                  bool noBias = false) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
-    addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
-    if (!noBias) {
-        addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
-    }
-    return conv;
-}
+                                  bool noBias = false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 1acf240bf..63d8e8419 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -105,21 +105,12 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
+std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            const std::array<DimSize_t, DIM> &kernelDims,
                                            const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                                            const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                                           bool noBias=false) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
-    addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
-    if (!noBias) {
-        addProducer(convDW, 2, {nbChannels}, "b");
-    }
-    return convDW;
-}
+                                           bool noBias=false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 3edb4a288..b16a5e673 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -66,9 +66,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Div(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
-}
-}
+std::shared_ptr<Node> Div(const std::string& name = "");
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_DIV_H_ */
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index f615fedee..b6cc8f30c 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -35,23 +35,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Erf_Op(const Erf_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Erf_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Erf_Op(const Erf_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Erf_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Erf_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
@@ -63,9 +53,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Erf(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Erf_Op>(), name);
-}
+std::shared_ptr<Node> Erf(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_ERF_H_ */
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 01da37a05..f1996fbae 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -53,9 +53,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::FC_Op
      */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<FC_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override final;
 
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
@@ -78,15 +76,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
-    addProducer(fc, 1, {outChannels, inChannels}, "w");
-    if (!noBias) {
-        addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
-    }
-    return fc;
-}
+std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index caf904e87..aebe3879b 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -67,25 +67,13 @@ public:
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Fold_Op(const Fold_Op<DIM> &op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
-        }
-        else {
-            mImpl = nullptr;
-        }
-    }
+    Fold_Op(const Fold_Op<DIM> &op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Fold_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Fold_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -106,15 +94,11 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Fold(const std::array<DimSize_t, DIM> &outputDims,
+std::shared_ptr<Node> Fold(const std::array<DimSize_t, DIM> &outputDims,
                                   const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported");
-    return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
-}
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
 
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> Fold(
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 5f3917e48..f2e3b0fe8 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -55,39 +55,19 @@ public:
 
     Gather_Op(std::int8_t axis,
               const std::vector<int64_t>& indices,
-              const std::vector<DimSize_t>& gatheredShape)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
-        mAttributes(std::make_shared<Attributes_>(
-            attr<GatherAttr::Axis>(axis),
-            attr<GatherAttr::Indices>(indices),
-            attr<GatherAttr::GatheredShape>(gatheredShape)))
-    {
-        mImpl = std::make_shared<Gather_OpImpl>(*this);
-    }
+              const std::vector<DimSize_t>& gatheredShape);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Gather_Op(const Gather_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Gather_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Gather_OpImpl>(*this);
-        }
-    }
+    Gather_Op(const Gather_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Gather_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Gather_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -107,9 +87,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Gather_Op>(axis, indices, gatheredShape), name);
-}
+std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = "");
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 8196c4268..41516a397 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -35,43 +35,23 @@ private:
     const std::shared_ptr<DynamicAttributes> mAttributes;
 
 public:
-    GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut)
-        : OperatorTensor(type, inputsCategory, nbOut)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+    GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut);
 
-    GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
-        : OperatorTensor(type, [nbData, nbParam]() {
-                                std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
-                                inputsCategory.resize(nbData + nbParam, InputCategory::Param);
-                                return inputsCategory;
-                            }(), nbOut),
-          mAttributes(std::make_shared<DynamicAttributes>())
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+    GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    GenericOperator_Op(const GenericOperator_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
-    }
+    GenericOperator_Op(const GenericOperator_Op& op);
 
-    ~GenericOperator_Op() = default;
+    ~GenericOperator_Op() noexcept;
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::GenericOperator_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<GenericOperator_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 public:
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -111,10 +91,8 @@ public:
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
-inline std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector<InputCategory>& inputCategory, IOIndex_t nbOut,
-                                             const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, inputCategory, nbOut), name);
-}
+std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector<InputCategory>& inputCategory, IOIndex_t nbOut,
+                                             const std::string& name = "");
 
 /**
  * @brief Fictive custom operator not associated with any implementation.
@@ -126,10 +104,8 @@ inline std::shared_ptr<Node> GenericOperator(const std::string& type, const std:
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
-inline std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
-                                             const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
-}
+std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
+                                             const std::string& name = "");
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ */
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 8bb738e8b..734e12344 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -39,18 +39,9 @@ public:
 
   GlobalAveragePooling_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
-  GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op)
-      : OperatorTensor(op) {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-  }
+  GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op);
 
-  std::shared_ptr<Operator> clone() const override {
-    return std::make_shared<GlobalAveragePooling_Op>(*this);
-  }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -64,11 +55,8 @@ public:
   }
 };
 
-inline std::shared_ptr<Node>
-GlobalAveragePooling(const std::string &name = "") {
-  return std::make_shared<Node>(std::make_shared<GlobalAveragePooling_Op>(),
-                                name);
-}
+std::shared_ptr<Node> GlobalAveragePooling(const std::string &name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_GLOBAL_AVERAGE_POOLING_H_ */
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index e07df59d8..622d6290a 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -27,8 +27,6 @@
 
 namespace Aidge {
 
-
-
 /**
  * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
  * This Operator has no Implementation, it just forward its input Tensor.
@@ -41,29 +39,20 @@ class Identity_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Identity_Op()
-        : OperatorTensor(Type, {InputCategory::Data}, 1)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+    Identity_Op();
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Identity_Op(const Identity_Op& op)
-        : OperatorTensor(op)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
-    }
+    Identity_Op(const Identity_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Identity_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Identity_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     // bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
 
@@ -75,9 +64,7 @@ public:
      * @return true Input has dimensions.
      * @return false Input has no dimensions or is a nullptr.
      */
-    bool dimsForwarded() const override final {
-        return mInputs[0] ? (mInputs[0]->undefined() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
-    }
+    bool dimsForwarded() const override final;
 
 
     void forward() override final;
@@ -99,9 +86,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Identity(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Identity_Op>(), name);
-}
+std::shared_ptr<Node> Identity(const std::string& name = "");
+
 }
 
 #endif /* AIDGE_CORE_OPERATOR_IDENTITY_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 3057b99f7..30d171eab 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -17,7 +17,6 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Data.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -54,31 +53,15 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    LeakyReLU_Op(const LeakyReLU_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    LeakyReLU_Op(const LeakyReLU_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::LeakyReLU_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<LeakyReLU_Op>(*this);
-    }
-
+    std::shared_ptr<Operator> clone() const override;
 
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
@@ -91,9 +74,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
-}
+std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "");
 }
 
 namespace {
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Ln.hpp
index d4010471c..c6a9ec4c8 100755
--- a/include/aidge/operator/Ln.hpp
+++ b/include/aidge/operator/Ln.hpp
@@ -36,23 +36,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Ln_Op(const Ln_Op& op)
-        : OperatorTensor(op)
-    {
-       if (op.mImpl){
-            SET_IMPL_MACRO(Ln_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Ln_Op(const Ln_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Ln_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Ln_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -65,9 +55,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Ln(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Ln_Op>(), name);
-}
+std::shared_ptr<Node> Ln(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_LN_H_ */
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index be460ee88..f81fb7bd0 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -36,22 +36,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    MatMul_Op(const MatMul_Op& op) : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(MatMul_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    MatMul_Op(const MatMul_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MatMul_Op
      */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<MatMul_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override final;
 
     /**
      * @brief Compute dimensions for the output Tensor following the same rules as
@@ -77,9 +68,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> MatMul(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
-}
+std::shared_ptr<Node> MatMul(const std::string& name = "");
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MATMUL_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 7e2c68681..3b7473a6a 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -20,7 +20,6 @@
 #include <stdexcept>   // std::runtime_error
 #include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
@@ -51,71 +50,25 @@ private:
 public:
     MaxPooling_Op() = delete;
 
-    constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+    MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                            bool ceil_mode = false)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<MaxPoolingAttr::StrideDims>(stride_dims),
-            attr<MaxPoolingAttr::KernelDims>(kernel_dims),
-            attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
-        {}
+                            bool ceil_mode = false);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    MaxPooling_Op(const MaxPooling_Op<DIM>& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    MaxPooling_Op(const MaxPooling_Op<DIM>& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MaxPooling_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<MaxPooling_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        if (inputsAssociated()) {
-            std::array<DimSize_t, DIM + 2> outputDims{};
-            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-
-            std::function<float(float)> roundingFunction;
-            if (mAttributes->template getAttr<MaxPoolingAttr::CeilMode>()) {
-                roundingFunction = [](float x) { return std::ceil(x); };
-            } else {
-                roundingFunction = [](float x) { return std::floor(x); };
-            }
-
-            for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
-                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            roundingFunction(static_cast<float>(inputDims[dim+2] -
-                                                                    mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
-                                            static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
-            }
-            outputDims[1] = inputDims[1];
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-            return true;
-        }
-        return false;
-    }
-
-
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
@@ -130,17 +83,15 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string MaxPooling_Op<DIM>::Type = "MaxPooling";
+extern template class Aidge::MaxPooling_Op<1>;
+extern template class Aidge::MaxPooling_Op<2>;
+extern template class Aidge::MaxPooling_Op<3>;
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                            const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                           bool ceil_mode=false) {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
-    return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, ceil_mode), name);
-}
+                                           bool ceil_mode=false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index bb652e833..a1d90f06f 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -50,40 +50,20 @@ private:
 public:
     Memorize_Op() = delete;
 
-    Memorize_Op(const std::uint32_t endStep)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
-          mAttributes(std::make_shared<Attributes_>(
-                        attr<MemorizeAttr::ScheduleStep>(0),
-                        attr<MemorizeAttr::ForwardStep>(0),
-                        attr<MemorizeAttr::EndStep>(endStep)))
-    {
-        mOutputs[1] = mOutputs[0];
-    }
+    Memorize_Op(const std::uint32_t endStep);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
      * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Memorize_Op(const Memorize_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-        mOutputs[1] = mOutputs[0];
-    }
+    Memorize_Op(const Memorize_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Memorize_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Memorize_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -105,9 +85,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
-}
+std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 744564b4b..69f2120d9 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -50,7 +50,7 @@ public:
     /**
      * Set the node that should be used for the scheduling.
     */
-    void setUpperNode(std::shared_ptr<Node> node) {
+    inline void setUpperNode(std::shared_ptr<Node> node) {
         mUpperNode = node;
     }
 
@@ -58,9 +58,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MetaOperator_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<MetaOperator_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     inline const std::shared_ptr<GraphView>& getMicroGraph() const noexcept {
         return mGraph;
@@ -82,17 +80,7 @@ public:
     }
 
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        if (Registrar<MetaOperator_Op>::exists({name, type()})) {
-            // A custom implementation exists for this meta operator
-            mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
-        }
-
-        // The micro-graph should always be set to the right backend, since it
-        // shares input/output tensors.
-        // Input/output tensors backend are updated here.
-        mGraph->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     void setDataType(const DataType &datatype) const override {
         // The micro-graph should always be set to the right data type, since it
@@ -118,15 +106,9 @@ public:
 
 };
 
-inline std::shared_ptr<Node> MetaOperator(const char *type,
+std::shared_ptr<Node> MetaOperator(const char *type,
                                   const std::shared_ptr<GraphView>& graph,
-                                  const std::string& name = "")
-{
-    auto op = std::make_shared<MetaOperator_Op>(type, graph);
-    auto node = std::make_shared<Node>(op, name);
-    op->setUpperNode(node);
-    return node;
-}
+                                  const std::string& name = "");
 }  // namespace Aidge
 
 #endif /* MetaOperator_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 51681629c..bc3348377 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -33,43 +33,25 @@ namespace Aidge {
 
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
+extern std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
                                   DimSize_t out_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
-{
-    // Construct micro-graph
-    auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
-
-    auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
-    addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
-    if (!no_bias) {
-        addProducer(metaOp, 2, {out_channels}, "b");
-    }
-    return metaOp;
-}
+                                  bool no_bias = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
+extern std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
-{
-    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
-
-    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
-}
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
-inline std::shared_ptr<Node> PaddedConv(
+extern std::shared_ptr<Node> PaddedConv(
     DimSize_t in_channels,
     DimSize_t out_channels,
     DimSize_t const (&kernel_dims)[DIM],
@@ -77,46 +59,25 @@ inline std::shared_ptr<Node> PaddedConv(
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-    bool no_bias = false)
-{
-    return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
-}
+    bool no_bias = false);
 
 ////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
+std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
-{
-    // Construct micro-graph
-    auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
-
-    auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
-    addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
-    if (!no_bias) {
-        addProducer(metaOp, 2, {nb_channels}, "b");
-    }
-    return metaOp;
-}
+                                  bool no_bias = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
+std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
-{
-    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
-
-    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
-}
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
@@ -127,10 +88,7 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-    bool no_bias = false)
-{
-    return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
-}
+    bool no_bias = false);
 
 ////////////////////////////////////////////////////////////////////////////////
 
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index cf5a3f188..990891141 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -35,32 +35,19 @@ class Move_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Move_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {
-        mImpl = std::make_shared<Move_OpImpl>(*this);
-    }
+    Move_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Move_Op(const Move_Op& op)
-        : OperatorTensor(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Move_Op, *this, {op.getInput(0)->getImpl()->backend(), op.backend()});
-        }
-        else {
-            mImpl = std::make_shared<Move_OpImpl>(*this);
-        }
-    }
+    Move_Op(const Move_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Move_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Move_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
@@ -72,9 +59,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Move(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Move_Op>(), name);
-}
-}
+std::shared_ptr<Node> Move(const std::string& name = "");
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index e61393b28..35a4b7e06 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -39,23 +39,13 @@ public:
      * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Mul_Op(const Mul_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Mul_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Mul_Op(const Mul_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Mul_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Mul_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -69,9 +59,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Mul(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
-}
+std::shared_ptr<Node> Mul(const std::string& name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 215fafb7f..bdb5330a6 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -17,10 +17,8 @@
 #include <string>
 #include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
@@ -70,34 +68,12 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pad_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Pad_Op<DIM>>(*this);
-    }
-
+    std::shared_ptr<Operator> clone() const override;
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        if (inputsAssociated()) {
-            std::array<DimSize_t, DIM + 2> outputDims{};
-            const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
 
-            for (std::size_t dim = 0; dim < DIM; ++dim) {
-                outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
-                                    + inputDims[dim+2]
-                                    + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
-            }
-            outputDims[1] = inputDims[1];
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-            return true;
-        }
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-        return false;
-    }
-
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); }
@@ -113,14 +89,10 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
-                                           const std::string& name = "",
-                                           const PadBorderType &borderType = PadBorderType::Constant,
-                                           double borderValue = 0.0)
-{
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
-    return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
-}
+std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
+                        const std::string& name = "",
+                        const PadBorderType &borderType = PadBorderType::Constant,
+                        double borderValue = 0.0);
 
 // helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index fb3b32eea..41ab3c537 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -44,36 +44,19 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    Pop_Op()
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0)))
-    {
-        mImpl = std::make_shared<Pop_OpImpl>(*this);
-    }
+    Pop_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Pop_Op(const Pop_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Pop_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Pop_OpImpl>(*this);
-        }
-    }
+    Pop_Op(const Pop_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pop_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Pop_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -92,9 +75,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Pop(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Pop_Op>(), name);
-}
+std::shared_ptr<Node> Pop(const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index ee5c01c21..eaf4297fd 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -66,9 +66,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Pow(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
-}
+std::shared_ptr<Node> Pow(const std::string& name = "");
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_POW_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 1647c563d..257a6965b 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -45,14 +45,7 @@ public:
 
     template <std::size_t DIM>
     Producer_Op(const std::array<DimSize_t, DIM>& dims,
-                bool constant = false)
-        : OperatorTensor(Type, {}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ProdAttr::Constant>(constant)))
-    {
-        mOutputs[0]->resize(dims);
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+                bool constant = false);
 
     /**
      * @brief Construct a new Producer_Op object from a Tensor.
@@ -82,15 +75,13 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Producer_Op(const Producer_Op&)
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Producer_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
     }
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
+    inline bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
 
     inline bool dimsForwarded() const noexcept override final { return true; }
 
@@ -115,19 +106,11 @@ public:
         // fmt::print("Basic Producer backward() function.\n");
     }
 
-    void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const override {
-        if (mAttributes->template getAttr<ProdAttr::Constant>()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
-        }
-        OperatorTensor::setOutput(outputIdx, data);
-    }
+    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override;
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "", bool constant = false) {
-  static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
-  return std::make_shared<Node>(std::make_shared<Producer_Op>(dims, constant), name);
-}
+std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "", bool constant = false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <std::size_t DIM>
@@ -135,20 +118,13 @@ inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::s
   return Producer(to_array(dims), name, constant);
 }
 
-inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "", bool constant = false) {
-  return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor, constant), name);
-}
+std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "", bool constant = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) {
-    assert(inputIdx != gk_IODefaultIndex);
-    static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
-    const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
-    auto prod = Producer(dims, prodName);
-    prod->addChild(otherNode, 0, inputIdx);
-    otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
-    return prod;
-}
+std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode,
+            const IOIndex_t inputIdx,
+            const std::array<DimSize_t, DIM>& dims,
+            const std::string& extension);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <std::size_t DIM>
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 40b5d581d..cc714c461 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -36,23 +36,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ReLU_Op(const ReLU_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ReLU_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ReLU_Op(const ReLU_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ReLU_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ReLU_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -65,9 +55,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
-}
+std::shared_ptr<Node> ReLU(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 000607c60..07beb0a39 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -45,35 +45,19 @@ private:
 public:
     ReduceMean_Op() = delete;
 
-    ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ReduceMeanAttr::Axes>(axes),
-            attr<ReduceMeanAttr::KeepDims>(keep_dims)))
-    {}
+    ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ReduceMean_Op(const ReduceMean_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    ReduceMean_Op(const ReduceMean_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ReduceMean_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ReduceMean_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -101,27 +85,9 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> Node containing the Operator.
  */
-inline std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
+std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
                                         DimSize_t keep_dims=1,
-                                        const std::string& name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
-    return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims), name);
-
-}
-
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
-// template <DimSize_t DIM>
-// inline std::shared_ptr<Node> ReduceMean(
-//     std::int32_t const (&axes)[DIM],
-//     DimSize_t keep_dims = 1,
-//     const std::string& name = "") {
-//     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
-//     return ReduceMean(to_array(axes), keep_dims, name);
-// }
-
-// template <DimIdx_t DIM>
-// const std::string ReduceMean_Op::Type = "ReduceMean";
+                                        const std::string& name = "");
 
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 29a08c76c..5bd9b3e8d 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -47,38 +47,19 @@ private:
 public:
     Reshape_Op() = delete;
 
-    Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ReshapeAttr::Shape>(shape),
-            attr<ReshapeAttr::AllowZero>(allowzero)))
-    {
-        mImpl = std::make_shared<Reshape_OpImpl>(*this);
-    }
+    Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Reshape_Op(const Reshape_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Reshape_OpImpl>(*this);
-        }
-    }
+    Reshape_Op(const Reshape_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Reshape_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Reshape_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -97,12 +78,9 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
-                                     bool allowzero = false,
-                                   	 const std::string &name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape, allowzero), name);
-}
+std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
+                            bool allowzero = false,
+                            const std::string &name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 565affc57..622a1ff1b 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -30,38 +30,20 @@ class Resize_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Resize_Op()
-        : OperatorTensor(Type,
-            {InputCategory::Data,
-                InputCategory::OptionalData,
-                InputCategory::OptionalData,
-                InputCategory::OptionalData},
-            1) {}
+    Resize_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
      * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-
-    Resize_Op(const Resize_Op& op)
-        : OperatorTensor(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Resize_Op, *this, op.backend());
-        }
-        else {
-            mImpl = nullptr;
-        }
-    }
+    Resize_Op(const Resize_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Resize_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Resize_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -77,10 +59,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Resize(const std::string &name = "") {
-
-    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
-}
+std::shared_ptr<Node> Resize(const std::string &name = "");
 
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 0683a26f6..311dc0202 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -42,36 +42,19 @@ private:
 public:
     Scaling_Op() = delete;
 
-    Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ScalingAttr::ScalingFactor>(scalingFactor),
-            attr<ScalingAttr::QuantizedNbBits>(nbBits),
-            attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
-    {}
+    Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Scaling_Op(const Scaling_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Scaling_Op(const Scaling_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Scaling_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Scaling_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -93,13 +76,10 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
 }
 */
-inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
+std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
                                      std::size_t quantizedNbBits=8,
                                      bool isOutputUnsigned=true,
-                                     const std::string& name = "")
-{
-    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
-}
+                                     const std::string& name = "");
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 94f237726..d76a9fd06 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -49,38 +49,19 @@ private:
 public:
     Shape_Op() = delete;
 
-    Shape_Op(const std::int64_t start, const std::int64_t end)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ShapeAttr::Start>(start),
-            attr<ShapeAttr::End>(end)))
-    {
-        mImpl = std::make_shared<Shape_OpImpl>(*this);
-    }
+    Shape_Op(const std::int64_t start, const std::int64_t end);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Shape_Op(const Shape_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Shape_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Shape_OpImpl>(*this);
-        }
-    }
+    Shape_Op(const Shape_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Shape_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Shape_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -98,9 +79,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
-}
+std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "");
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
index 879edcac6..4d3000750 100644
--- a/include/aidge/operator/ShiftGELU.hpp
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -32,29 +32,19 @@ class ShiftGELU_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    ShiftGELU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    ShiftGELU_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ShiftGELU_Op(const ShiftGELU_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ShiftGELU_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ShiftGELU_Op(const ShiftGELU_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ShiftGELU_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ShiftGELU_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -67,9 +57,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> ShiftGELU(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<ShiftGELU_Op>(), name);
-}
+std::shared_ptr<Node> ShiftGELU(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SHIFTGELU_H_ */
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
index f17113021..d75e6559f 100644
--- a/include/aidge/operator/ShiftMax.hpp
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -32,29 +32,19 @@ class ShiftMax_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    ShiftMax_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    ShiftMax_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ShiftMax_Op(const ShiftMax_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ShiftMax_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ShiftMax_Op(const ShiftMax_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ShiftMax_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ShiftMax_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -67,9 +57,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> ShiftMax(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<ShiftMax_Op>(), name);
-}
+std::shared_ptr<Node> ShiftMax(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SHIFTMAX_H_ */
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index ae82d4a3a..b3204240c 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -30,30 +30,11 @@ class Sigmoid_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Sigmoid_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    Sigmoid_Op();
 
-    /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
-     */
-    Sigmoid_Op(const Sigmoid_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Sigmoid_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Sigmoid_Op>(*this);
-    }
+    Sigmoid_Op(const Sigmoid_Op& op);
 
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -65,9 +46,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Sigmoid(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Sigmoid_Op>(), name);
-}
+std::shared_ptr<Node> Sigmoid(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 04a67fe98..241e165a0 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -45,14 +45,10 @@ private:
 public:
     Slice_Op() = delete;
 
-    Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>& ends, const std::vector<std::int8_t>& axes, const std::vector<std::int64_t>& steps)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<SliceAttr::Starts>(starts),
-            attr<SliceAttr::Ends>(ends),
-            attr<SliceAttr::Axes>(axes),
-            attr<SliceAttr::Steps>(steps)))
-    {}
+    Slice_Op(const std::vector<std::int64_t>& starts,
+            const std::vector<std::int64_t>& ends,
+            const std::vector<std::int8_t>& axes,
+            const std::vector<std::int64_t>& steps);
 
 
     /**
@@ -60,24 +56,14 @@ public:
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Slice_Op(const Slice_Op &op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Slice_Op, *this, op.backend());
-        }
-        else {
-            mImpl = nullptr;
-        }
-    }
+    Slice_Op(const Slice_Op &op);
 
 public:
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Slice_Op
      */
-    std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = true) override final;
@@ -104,13 +90,11 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
-inline std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
+std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
                                    const std::vector<std::int64_t>& ends = {},
                                    const std::vector<std::int8_t>& axes = {},
                                    const std::vector<std::int64_t>& steps = {},
-                                   const std::string &name = "") {
-    return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes, steps), name);
-}
+                                   const std::string &name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 0b7a8e571..c221a67e3 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -42,34 +42,19 @@ private:
 public:
     Softmax_Op() = delete;
 
-    Softmax_Op(std::int32_t axis)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-                attr<SoftmaxAttr::Axis>(axis)))
-    {}
+    Softmax_Op(std::int32_t axis);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Softmax_Op(const Softmax_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    Softmax_Op(const Softmax_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Softmax_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Softmax_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -85,9 +70,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
-}
+std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "");
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 7bdec1579..661f9e32d 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -47,14 +47,7 @@ private:
 public:
     Split_Op() = delete;
 
-    Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<SplitAttr::Axis>(axis),
-            attr<SplitAttr::Split>(split)))
-    {
-        mImpl = std::make_shared<Split_OpImpl>(*this);
-    }
+    Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split);
 
 
     /**
@@ -62,23 +55,14 @@ public:
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Split_Op(const Split_Op &op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Split_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Split_OpImpl>(*this);
-        }
-    }
+    Split_Op(const Split_Op &op);
+
 public:
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Split_Op
      */
-    std::shared_ptr<Operator> clone() const override { return std::make_shared<Split_Op>(*this); }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -103,12 +87,10 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
-inline std::shared_ptr<Node> Split(DimSize_t nbOutput,
+std::shared_ptr<Node> Split(DimSize_t nbOutput,
                                    std::int8_t axis = 0,
                                    const std::vector<DimSize_t>& split = {},
-                                   const std::string &name = "") {
-    return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
-}
+                                   const std::string &name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 05b20286b..ce4aaafc9 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -14,8 +14,8 @@
 
 #include <memory>
 #include <vector>
+#include <string>
 
-#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -24,12 +24,9 @@
 namespace Aidge {
 
 class Sqrt_Op : public OperatorTensor,
-    public Registrable<Sqrt_Op, std::string, std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
+                public Registrable<Sqrt_Op,
+                                std::string,
+                                std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
 public:
     static const std::string Type;
 
@@ -39,23 +36,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Sqrt_Op(const Sqrt_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Sqrt_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    Sqrt_Op(const Sqrt_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Sqrt_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Sqrt_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -67,9 +54,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Sqrt(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
-}
+std::shared_ptr<Node> Sqrt(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SQRT_H_ */
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index ba5a021c3..bb29ba678 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -26,37 +26,23 @@ namespace Aidge {
 
 class Sub_Op : public OperatorTensor,
     public Registrable<Sub_Op, std::string, std::shared_ptr<OperatorImpl>(const Sub_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static const std::string Type;
 
+public:
     Sub_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Sub_Op(const Sub_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Sub_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Sub_Op(const Sub_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Sub_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Sub_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -71,9 +57,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Sub(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
-}
+std::shared_ptr<Node> Sub(const std::string& name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index b5f183a90..fd05bf7c4 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -28,29 +28,19 @@ class Tanh_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Tanh_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    Tanh_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Tanh_Op(const Tanh_Op& op)
-        : OperatorTensor(op)
-    {
-       if (op.mImpl){
-            SET_IMPL_MACRO(Tanh_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Tanh_Op(const Tanh_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Tanh_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Tanh_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -63,9 +53,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Tanh(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Tanh_Op>(), name);
-}
+std::shared_ptr<Node> Tanh(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_TANH_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index efd9e1792..375d6e098 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -50,37 +50,19 @@ private:
 public:
     Transpose_Op() = delete;
 
-    Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)))
-    {
-        mImpl = std::make_shared<TransposeImpl>(*this);
-    }
+    Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Transpose_Op(const Transpose_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<TransposeImpl>(*this);
-        }
-    }
+    Transpose_Op(const Transpose_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Transpose_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Transpose_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -97,10 +79,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
-                                           const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
-}
+std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
+                                           const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 58cbcd2d7..3fda7c214 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -57,42 +57,22 @@ private:
 public:
     Unfold_Op() = delete;
 
-    constexpr Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims,
-                      const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<UnfoldAttr::StrideDims>(strideDims),
-            attr<UnfoldAttr::DilationDims>(dilationDims),
-            attr<UnfoldAttr::KernelDims>(kernelDims)))
-    {
-        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);  
-    }
+    Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims,
+            const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+            const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Unfold_Op(const Unfold_Op<DIM> &op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);  
-        }
-    }
+    Unfold_Op(const Unfold_Op<DIM> &op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Unfold_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Unfold_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -112,14 +92,10 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Unfold(const std::array<DimSize_t, DIM> &kernelDims,
+std::shared_ptr<Node> Unfold(const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
-    return std::make_shared<Node>(std::make_shared<Unfold_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
-}
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
 
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> Unfold(
diff --git a/src/graph/Matching.cpp b/src/graph/Matching.cpp
index a840b6ab5..b93ac16a9 100644
--- a/src/graph/Matching.cpp
+++ b/src/graph/Matching.cpp
@@ -2,6 +2,33 @@
 
 #include <fmt/color.h>
 
+Aidge::SinglePassGraphMatching::Context::Context() = default;
+Aidge::SinglePassGraphMatching::Context::Context(const Context& other) = default;
+Aidge::SinglePassGraphMatching::Context& Aidge::SinglePassGraphMatching::Context::operator=(const Context& other) = default;
+Aidge::SinglePassGraphMatching::Context::~Context() = default;
+
+////////////////////////////////////////////////////////////
+
+Aidge::SinglePassGraphMatching::MatchingResult::MatchingResult() : graph(std::make_shared<GraphView>()), startNode(nullptr) {}
+Aidge::SinglePassGraphMatching::MatchingResult::MatchingResult(const Aidge::SinglePassGraphMatching::MatchingResult& other) {
+    graph = std::make_shared<GraphView>(*(other.graph.get()));
+    anchors = other.anchors;
+    startNode = other.startNode;
+}
+Aidge::SinglePassGraphMatching::MatchingResult& Aidge::SinglePassGraphMatching::MatchingResult::operator=(const Aidge::SinglePassGraphMatching::MatchingResult& other) {
+    graph = std::make_shared<GraphView>(*(other.graph.get()));
+    anchors = other.anchors;
+    startNode = other.startNode;
+    return *this;
+}
+Aidge::SinglePassGraphMatching::MatchingResult::~MatchingResult() noexcept = default;
+
+//////////////////////////////////////////////////////////
+
+Aidge::SinglePassGraphMatching::SinglePassGraphMatching(const Aidge::SinglePassGraphMatching& other) = default;
+Aidge::SinglePassGraphMatching& Aidge::SinglePassGraphMatching::operator=(const Aidge::SinglePassGraphMatching& other) = default;
+Aidge::SinglePassGraphMatching::~SinglePassGraphMatching() noexcept = default;
+
 std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphMatching::match(const std::string& query, bool disjoint) {
     Context ctx;
     ctx.query = query;
@@ -104,7 +131,7 @@ bool Aidge::SinglePassGraphMatching::matchNodeOrBlock(Context& ctx, std::set<Mat
         newCtx.query.erase(0, 1);
 
         removeWhiteSpace(newCtx.query);
-        const auto endQuantity = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        const auto endQuantity = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return !isdigit(c); });
         if (endQuantity != newCtx.query.begin()) {
             matchQuantity = std::stoi(newCtx.query.substr(0, endQuantity - newCtx.query.begin()));
@@ -401,7 +428,7 @@ bool Aidge::SinglePassGraphMatching::matchEdge(Context& ctx, std::set<MatchingRe
     // optional first IO_INDEX
     int firstIdx = 0;
     bool foundFirst = false;
-    const auto endOutputIdx = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+    const auto endOutputIdx = std::find_if(newCtx.query.begin(), newCtx.query.end(),
         [](char c) { return !isdigit(c); });
     if (endOutputIdx != newCtx.query.begin()) {
         firstIdx = std::stoi(newCtx.query.substr(0, endOutputIdx - newCtx.query.begin()));
@@ -421,7 +448,7 @@ bool Aidge::SinglePassGraphMatching::matchEdge(Context& ctx, std::set<MatchingRe
         auto query = newCtx.query;
         query.erase(0, 1); // drop '-'
 
-        const auto endInputIdx = std::find_if(query.begin(), query.end(), 
+        const auto endInputIdx = std::find_if(query.begin(), query.end(),
             [](char c) { return !isdigit(c); });
         if (endInputIdx != query.begin()) {
             secondIdx = std::stoi(query.substr(0, endInputIdx - query.begin()));
@@ -500,7 +527,7 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     }
     else {
         // TYPE
-        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return (!isalnum(c) && c != '_'); });
 
         if (endIdentifier == newCtx.query.begin()) {
@@ -519,7 +546,7 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
         newCtx.query.erase(0, 1); // drop '#'
 
         // ANCHOR
-        const auto endAnchor = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        const auto endAnchor = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return (!isalnum(c) && c != '_'); });
         anchor = "#" + newCtx.query.substr(0, endAnchor - newCtx.query.begin());
         newCtx.query = newCtx.query.substr(endAnchor - newCtx.query.begin());
@@ -532,7 +559,7 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
         newCtx.query.erase(0, 1);
 
         // LAMBDA
-        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return (!isalnum(c) && c != '_'); });
 
         if (endIdentifier == newCtx.query.begin()) {
diff --git a/src/graph/OpArgs.cpp b/src/graph/OpArgs.cpp
index e1a378c3d..cffd14c35 100644
--- a/src/graph/OpArgs.cpp
+++ b/src/graph/OpArgs.cpp
@@ -13,12 +13,15 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
 
+Aidge::OpArgs::OpArgs(const OpArgs&) = default;
+Aidge::OpArgs& Aidge::OpArgs::operator=(const OpArgs&) = default;
+Aidge::OpArgs::~OpArgs() noexcept = default;
 
 std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs) {
     std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
     for (const OpArgs& elt : inputs) {
         if(elt.node() != nullptr) {
-            // Connect the first output (ordered) of each output node (ordered) 
+            // Connect the first output (ordered) of each output node (ordered)
             // to the next available input of the input node.
             AIDGE_ASSERT(static_cast<std::size_t>(elt.node()->getNbFreeDataInputs()) >= gv->outputNodes().size(),
                 "Sequential(): not enough free data inputs ({}) for input node {} (of type {}) to connect to all previous output nodes ({})",
@@ -33,7 +36,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs)
             gv->add(elt.node());
         }
         else {
-            // For each input node, connect the first output (ordered) of each 
+            // For each input node, connect the first output (ordered) of each
             // output node (ordered) to the next available input
             std::set<NodePtr> connectedInputs;
             for (const auto& node_in : elt.view()->getOrderedInputs()) {
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 57ece0715..f9dc3335a 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -22,6 +22,14 @@
 
 const std::string Aidge::Add_Op::Type = "Add";
 
+Aidge::Add_Op::Add_Op(const IOIndex_t nbIn)
+    : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
+{
+    if (nbIn == 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
+    }
+}
+
 Aidge::Add_Op::Add_Op(const Add_Op& op)
     : OperatorTensor(op)
 {
@@ -32,6 +40,10 @@ Aidge::Add_Op::Add_Op(const Add_Op& op)
     }
 }
 
+std::shared_ptr<Aidge::Operator> Aidge::Add_Op::clone() const {
+    return std::make_shared<Add_Op>(*this);
+}
+
 bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
@@ -71,4 +83,8 @@ bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(Add_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+std::shared_ptr<Aidge::Node> Aidge::Add(const IOIndex_t nbIn, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
 }
\ No newline at end of file
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 53ffb9326..db06d8486 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -26,6 +26,7 @@
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
 
+
 template <Aidge::DimIdx_t DIM>
 Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
     : OperatorTensor(op),
@@ -38,6 +39,11 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::AvgPooling_Op<DIM>::clone() const {
+    return std::make_shared<AvgPooling_Op<DIM>>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -110,4 +116,15 @@ void Aidge::AvgPooling_Op<DIM>::setBackend(const std::string &name, Aidge::Devic
 template class Aidge::AvgPooling_Op<1>;
 template class Aidge::AvgPooling_Op<2>;
 template class Aidge::AvgPooling_Op<3>;
-template class Aidge::AvgPooling_Op<4>;
\ No newline at end of file
+template class Aidge::AvgPooling_Op<4>;
+
+////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name,
+                                           const std::array<Aidge::DimSize_t, DIM> &stride_dims) {
+    AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", AvgPooling_Op<DIM>::Type);
+    return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
+}
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index 98e5c2da2..a81cfc132 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -38,6 +38,11 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::BatchNorm_Op<DIM>::clone() const {
+    return std::make_shared<BatchNorm_Op<DIM>>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -95,7 +100,7 @@ template class Aidge::BatchNorm_Op<3>;
 template class Aidge::BatchNorm_Op<4>;
 
 template <Aidge::DimSize_t DIM>
-inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const DimSize_t nbFeatures,
+inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const Aidge::DimSize_t nbFeatures,
                                        const float epsilon,
                                        const float momentum,
                                        const std::string& name) {
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index 8df153a67..b6164a77c 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -46,3 +46,7 @@ void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+std::shared_ptr<Aidge::Node> Aidge::Cast(const Aidge::DataType targetType, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
+}
\ No newline at end of file
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 4649a954a..c78afa866 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -18,6 +18,35 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+const std::string Aidge::Concat_Op::Type = "Concat";
+
+Aidge::Concat_Op::Concat_Op(const Aidge::IOIndex_t nbIn, const std::int32_t axis)
+    : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ConcatAttr::Axis>(axis)))
+{
+    if (nbIn == 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
+    }
+    mImpl = std::make_shared<Concat_OpImpl>(*this);
+}
+
+Aidge::Concat_Op::Concat_Op(const Aidge::Concat_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Concat_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Concat_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Concat_Op::clone() const {
+    return std::make_shared<Concat_Op>(*this);
+}
+
 void Aidge::Concat_OpImpl::forward() {
     const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
     const DimSize_t axis = op.axis();
@@ -56,7 +85,6 @@ void Aidge::Concat_OpImpl::forward() {
     }
 }
 
-const std::string Aidge::Concat_Op::Type = "Concat";
 
 bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
     if (!inputsAssociated()) {
@@ -105,3 +133,9 @@ void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Concat(const Aidge::IOIndex_t nbIn, const std::int32_t axis, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
+}
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index a33af7877..5f9ecff94 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -158,4 +158,26 @@ void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
 }
 
 template class Aidge::Conv_Op<1>;
-template class Aidge::Conv_Op<2>;
\ No newline at end of file
+template class Aidge::Conv_Op<2>;
+
+/////////////////////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Conv(Aidge::DimSize_t inChannels,
+                                  Aidge::DimSize_t outChannels,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilationDims,
+                                  bool noBias) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+    addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
+    if (!noBias) {
+        addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
+    }
+    return conv;
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Conv<2>(Aidge::DimSize_t, Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index 342fd8619..109a122d7 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -157,4 +157,25 @@ void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::De
 }
 
 template class Aidge::ConvDepthWise_Op<1>;
-template class Aidge::ConvDepthWise_Op<2>;
\ No newline at end of file
+template class Aidge::ConvDepthWise_Op<2>;
+
+////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise(const Aidge::DimSize_t nbChannels,
+                                           const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                           const std::string& name,
+                                           const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                           const std::array<Aidge::DimSize_t, DIM> &dilationDims,
+                                           bool noBias) {
+    // FIXME: properly handle default w&b initialization in every cases
+    AIDGE_ASSERT(DIM<=MaxDim,"Too many kernel dimensions required by {}, not supported", ConvDepthWise_Op<DIM>::Type);
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+    addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
+    if (!noBias) {
+        addProducer(convDW, 2, {nbChannels}, "b");
+    }
+    return convDW;
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise<2>(Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 387a95160..2140b17a3 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -56,3 +56,9 @@ void Aidge::Div_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Div_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+///////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Div(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp
index 81c87f10b..ed1f79f79 100644
--- a/src/operator/Erf.cpp
+++ b/src/operator/Erf.cpp
@@ -19,7 +19,27 @@
 
 const std::string Aidge::Erf_Op::Type = "Erf";
 
+Aidge::Erf_Op::Erf_Op(const Aidge::Erf_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Erf_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Erf_Op::clone() const {
+    return std::make_shared<Erf_Op>(*this);
+}
+
 void Aidge::Erf_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Erf_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Erf(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Erf_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 44d499bc7..577a1842d 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -23,6 +23,10 @@
 
 const std::string Aidge::FC_Op::Type = "FC";
 
+std::shared_ptr<Aidge::Operator> Aidge::FC_Op::clone() const {
+    return std::make_shared<FC_Op>(*this);
+}
+
 void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs());
     AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
@@ -86,3 +90,16 @@ void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device
         getInput(2)->setBackend(name, device);
     }
 }
+
+std::shared_ptr<Aidge::Node> Aidge::FC(const Aidge::DimSize_t inChannels,
+                                       const Aidge::DimSize_t outChannels,
+                                       bool noBias,
+                                       const std::string& name) {
+    // FIXME: properly handle default w&b initialization in every cases
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
+    addProducer(fc, 1, {outChannels, inChannels}, "w");
+    if (!noBias) {
+        addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
+    }
+    return fc;
+}
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index abe73e54e..79ea0cea1 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -26,6 +26,24 @@
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::Fold_Op<DIM>::Type = "Fold";
 
+template <Aidge::DimIdx_t DIM>
+Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
+    }
+    else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::Fold_Op<DIM>::clone() const {
+    return std::make_shared<Fold_Op<DIM>>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -64,4 +82,17 @@ void Aidge::Fold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
-template class Aidge::Fold_Op<2>;
\ No newline at end of file
+template class Aidge::Fold_Op<2>;
+
+///////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Fold(const std::array<Aidge::DimSize_t, DIM> &outputDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilationDims) {
+    // FIXME: properly handle default w&b initialization in every cases
+    AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by Fold, not supported", Fold_Op<DIM>::Type);
+    return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
+}
\ No newline at end of file
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index cd3c43574..00d471f6d 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -20,6 +20,36 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
+const std::string Aidge::Gather_Op::Type = "Gather";
+
+
+Aidge::Gather_Op::Gather_Op(std::int8_t axis,
+              const std::vector<int64_t>& indices,
+              const std::vector<Aidge::DimSize_t>& gatheredShape)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<GatherAttr::Axis>(axis),
+        attr<GatherAttr::Indices>(indices),
+        attr<GatherAttr::GatheredShape>(gatheredShape)))
+{
+    mImpl = std::make_shared<Gather_OpImpl>(*this);
+}
+
+Aidge::Gather_Op::Gather_Op(const Aidge::Gather_Op& op)
+    : OperatorTensor(op), mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Gather_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Gather_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Gather_Op::clone() const {
+    return std::make_shared<Gather_Op>(*this);
+}
+
 void Aidge::Gather_OpImpl::forward() {
     const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
 
@@ -48,8 +78,6 @@ void Aidge::Gather_OpImpl::forward() {
     }
 }
 
-const std::string Aidge::Gather_Op::Type = "Gather";
-
 bool Aidge::Gather_Op::dimsForwarded() const {
     if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
@@ -113,3 +141,12 @@ void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+/////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Gather(std::int8_t axis,
+                                        const std::vector<int64_t>& indices,
+                                        const std::vector<Aidge::DimSize_t>& gatheredShape,
+                                        const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Gather_Op>(axis, indices, gatheredShape), name);
+}
\ No newline at end of file
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index d49e1f083..e8c66085d 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -18,6 +18,42 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+
+Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
+                                            const std::vector<Aidge::InputCategory>& inputsCategory,
+                                            Aidge::IOIndex_t nbOut)
+    : OperatorTensor(type, inputsCategory, nbOut)
+{
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
+
+Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
+                                            Aidge::IOIndex_t nbData,
+                                            Aidge::IOIndex_t nbParam,
+                                            Aidge::IOIndex_t nbOut)
+    : OperatorTensor(type, [nbData, nbParam]() {
+                            std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
+                            inputsCategory.resize(nbData + nbParam, InputCategory::Param);
+                            return inputsCategory;
+                        }(), nbOut),
+        mAttributes(std::make_shared<DynamicAttributes>())
+{
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
+
+Aidge::GenericOperator_Op::GenericOperator_Op(const Aidge::GenericOperator_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
+{
+    mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
+}
+
+Aidge::GenericOperator_Op::~GenericOperator_Op() noexcept = default;
+
+std::shared_ptr<Aidge::Operator> Aidge::GenericOperator_Op::clone() const {
+    return std::make_shared<GenericOperator_Op>(*this);
+}
+
 const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Identity
     = [](const std::vector<std::vector<std::size_t>>& inputsDims) { return inputsDims; };
 
@@ -55,3 +91,20 @@ void Aidge::GenericOperator_Op::setBackend(const std::string & name, DeviceIdx_t
         mOutputs[i]->setBackend(name, device);
     }
 }
+
+///////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
+                                            const std::vector<Aidge::InputCategory>& inputCategory,
+                                            Aidge::IOIndex_t nbOut,
+                                            const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, inputCategory, nbOut), name);
+}
+
+std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
+                                                Aidge::IOIndex_t nbData,
+                                                Aidge::IOIndex_t nbParam,
+                                                Aidge::IOIndex_t nbOut,
+                                                const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
+}
\ No newline at end of file
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index 1632c8a76..e7b2bdffb 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -21,6 +21,20 @@
 
 const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling";
 
+Aidge::GlobalAveragePooling_Op::GlobalAveragePooling_Op(const Aidge::GlobalAveragePooling_Op &op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::GlobalAveragePooling_Op::clone() const {
+    return std::make_shared<GlobalAveragePooling_Op>(*this);
+}
+
 bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
@@ -41,4 +55,10 @@ bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
-}
\ No newline at end of file
+}
+
+////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::GlobalAveragePooling(const std::string &name) {
+  return std::make_shared<Node>(std::make_shared<GlobalAveragePooling_Op>(), name);
+}
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index 2b8107bfc..2f60eb2fd 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -15,8 +15,35 @@
 
 const std::string Aidge::Identity_Op::Type = "Identity";
 
+Aidge::Identity_Op::Identity_Op()
+    : OperatorTensor(Type, {InputCategory::Data}, 1)
+{
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
+
+Aidge::Identity_Op::Identity_Op(const Aidge::Identity_Op& op)
+    : OperatorTensor(op)
+{
+    mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Identity_Op::clone() const {
+    return std::make_shared<Identity_Op>(*this);
+}
+
+bool Aidge::Identity_Op::dimsForwarded() const {
+    const auto& input0 = getInput(0);
+    return input0 ? (input0->undefined() ? false :
+                            input0->dims() == getOutput(0)->dims()) :
+                                false;
+}
+
 void Aidge::Identity_Op::forward() {
     // Perform a shallow copy
     *(mOutputs[0]) = *(mInputs[0]);
     runHooks();
 }
+
+std::shared_ptr<Aidge::Node> Aidge::Identity(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Identity_Op>(), name);
+}
diff --git a/src/operator/LeakyReLU.cpp b/src/operator/LeakyReLU.cpp
index 32e050ee1..9def23758 100644
--- a/src/operator/LeakyReLU.cpp
+++ b/src/operator/LeakyReLU.cpp
@@ -9,8 +9,37 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/LeakyReLU.hpp"
+
+#include <memory>
 #include <string>
 
-#include "aidge/operator/LeakyReLU.hpp"
+#include "aidge/data/Tensor.hpp"
+
+const std::string Aidge::LeakyReLU_Op::Type = "LeakyReLU";
+
+Aidge::LeakyReLU_Op::LeakyReLU_Op(const Aidge::LeakyReLU_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::LeakyReLU_Op::clone() const {
+    return std::make_shared<LeakyReLU_Op>(*this);
+}
+
+void Aidge::LeakyReLU_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+/////////////////////////////////////
 
-const std::string Aidge::LeakyReLU_Op::Type = "LeakyReLU";
\ No newline at end of file
+std::shared_ptr<Aidge::Node> Aidge::LeakyReLU(float negativeSlope, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
+}
\ No newline at end of file
diff --git a/src/operator/Ln.cpp b/src/operator/Ln.cpp
index 7e9f9ad01..61ff0dc52 100755
--- a/src/operator/Ln.cpp
+++ b/src/operator/Ln.cpp
@@ -19,7 +19,27 @@
 
 const std::string Aidge::Ln_Op::Type = "Ln";
 
+Aidge::Ln_Op::Ln_Op(const Aidge::Ln_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Ln_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Ln_Op::clone() const {
+    return std::make_shared<Ln_Op>(*this);
+}
+
 void Aidge::Ln_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     mImpl = Registrar<Ln_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
 }
+
+/////////////////////////////////
+
+inline std::shared_ptr<Aidge::Node> Aidge::Ln(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Ln_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 207229b93..c95fe544c 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -20,6 +20,20 @@
 
 const std::string Aidge::MatMul_Op::Type = "MatMul";
 
+Aidge::MatMul_Op::MatMul_Op(const Aidge::MatMul_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(MatMul_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::MatMul_Op::clone() const {
+    return std::make_shared<MatMul_Op>(*this);
+}
+
 bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated(false)) {
         if (getInput(0)->empty() && getInput(1)->empty()) {
@@ -82,3 +96,9 @@ void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     SET_IMPL_MACRO(MatMul_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::MatMul(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
new file mode 100644
index 000000000..85f2dd930
--- /dev/null
+++ b/src/operator/MaxPooling.cpp
@@ -0,0 +1,104 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MaxPooling.hpp"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling";
+
+template <Aidge::DimIdx_t DIM>
+Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                            const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                            bool ceil_mode)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+    attr<MaxPoolingAttr::StrideDims>(stride_dims),
+    attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+    attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
+{}
+
+template <Aidge::DimIdx_t DIM>
+Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const Aidge::MaxPooling_Op<DIM>& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::MaxPooling_Op<DIM>::clone() const {
+    return std::make_shared<MaxPooling_Op<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::MaxPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        std::array<DimSize_t, DIM + 2> outputDims{};
+        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+
+        std::function<float(float)> roundingFunction;
+        if (mAttributes->template getAttr<MaxPoolingAttr::CeilMode>()) {
+            roundingFunction = [](float x) { return std::ceil(x); };
+        } else {
+            roundingFunction = [](float x) { return std::floor(x); };
+        }
+
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
+            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                                        roundingFunction(static_cast<float>(inputDims[dim+2] -
+                                                                mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
+                                        static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
+        }
+        outputDims[1] = inputDims[1];
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+    return false;
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::MaxPooling_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+template class Aidge::MaxPooling_Op<1>;
+template class Aidge::MaxPooling_Op<2>;
+template class Aidge::MaxPooling_Op<3>;
+
+///////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::MaxPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name,
+                                           const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                           bool ceil_mode)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, ceil_mode), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&, bool);
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 88a182f2a..f713fdaad 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -75,6 +75,33 @@ void Aidge::Memorize_OpImpl::forward() {
 
 const std::string Aidge::Memorize_Op::Type = "Memorize";
 
+Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
+        mAttributes(std::make_shared<Attributes_>(
+                    attr<MemorizeAttr::ScheduleStep>(0),
+                    attr<MemorizeAttr::ForwardStep>(0),
+                    attr<MemorizeAttr::EndStep>(endStep)))
+{
+    mOutputs[1] = mOutputs[0];
+}
+
+Aidge::Memorize_Op::Memorize_Op(const Aidge::Memorize_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+    mOutputs[1] = mOutputs[0];
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Memorize_Op::clone() const {
+    return std::make_shared<Memorize_Op>(*this);
+}
+
+
 void Aidge::Memorize_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
     ++mAttributes->template getAttr<MemorizeAttr::ScheduleStep>();
@@ -125,3 +152,9 @@ void Aidge::Memorize_Op::forward() {
     ++mAttributes->template getAttr<MemorizeAttr::ForwardStep>();
     mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
 }
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Memorize(const std::uint32_t endStep, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
+}
\ No newline at end of file
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index e7c500337..71e3a4781 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -38,6 +38,10 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar
     }
 }
 
+std::shared_ptr<Aidge::Operator> Aidge::MetaOperator_Op::clone() const {
+    return std::make_shared<MetaOperator_Op>(*this);
+}
+
 void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) {
     AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
     AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
@@ -59,6 +63,18 @@ void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
 }
 
+void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    if (Registrar<MetaOperator_Op>::exists({name, type()})) {
+        // A custom implementation exists for this meta operator
+        mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
+    }
+
+    // The micro-graph should always be set to the right backend, since it
+    // shares input/output tensors.
+    // Input/output tensors backend are updated here.
+    mGraph->setBackend(name, device);
+}
+
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
     if (mImpl) {
         return mImpl->getNbRequiredData(inputIdx);
@@ -182,3 +198,15 @@ void Aidge::MetaOperator_Op::forward() {
         mScheduler->forward(false);
     }
 }
+
+/////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::MetaOperator(const char *type,
+                                  const std::shared_ptr<Aidge::GraphView>& graph,
+                                  const std::string& name)
+{
+    auto op = std::make_shared<MetaOperator_Op>(type, graph);
+    auto node = std::make_shared<Node>(op, name);
+    op->setUpperNode(node);
+    return node;
+}
\ No newline at end of file
diff --git a/src/operator/MetaOperatorDefs/PaddedConv.cpp b/src/operator/MetaOperatorDefs/PaddedConv.cpp
new file mode 100644
index 000000000..fddd57d0c
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/PaddedConv.cpp
@@ -0,0 +1,74 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConv(Aidge::DimSize_t in_channels,
+                                  Aidge::DimSize_t out_channels,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+                                  bool no_bias)
+{
+    auto metaOp = std::make_shared<Node>(PaddedConv_Op<DIM>(kernel_dims, stride_dims, padding_dims, dilation_dims), name);
+    std::static_pointer_cast<MetaOperator_Op>(metaOp->getOperator())->getMicroGraph()->setNodesName();
+    addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
+    if (!no_bias) {
+        addProducer(metaOp, 2, {out_channels}, "b");
+    }
+    return metaOp;
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<2>(const Aidge::DimSize_t, const Aidge::DimSize_t, const std::array<Aidge::DimSize_t,2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op(
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims)
+{
+    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
+
+    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
+}
+template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+template <Aidge::DimSize_t DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConv(
+    Aidge::DimSize_t in_channels,
+    Aidge::DimSize_t out_channels,
+    Aidge::DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name,
+    const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+    const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+    const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+    bool no_bias)
+{
+    return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<2>(const Aidge::DimSize_t, const Aidge::DimSize_t, const Aidge::DimSize_t (&)[2], const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
new file mode 100644
index 000000000..2b0e12d9c
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
@@ -0,0 +1,74 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(const Aidge::DimSize_t nb_channels,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+                                  bool no_bias)
+{
+    auto metaOp = std::make_shared<Node>(PaddedConvDepthWise_Op<DIM>(kernel_dims, stride_dims, padding_dims, dilation_dims), name);
+    std::static_pointer_cast<MetaOperator_Op>(metaOp->getOperator())->getMicroGraph()->setNodesName();
+    addProducer(metaOp, 1, append(nb_channels, append(Aidge::DimSize_t(1), kernel_dims)), "w");
+    if (!no_bias) {
+        addProducer(metaOp, 2, {nb_channels}, "b");
+    }
+    return metaOp;
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<2>(const Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
+
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op(
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims)
+{
+    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
+
+    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
+}
+template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
+
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+template <Aidge::DimSize_t DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(
+    const Aidge::DimSize_t nb_channels,
+    Aidge::DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name,
+    const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+    const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+    const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+    bool no_bias)
+{
+    return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<2>(const Aidge::DimSize_t, const Aidge::DimSize_t (&)[2], const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index 0f635ea65..4190c10a0 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -19,6 +19,27 @@ void Aidge::Move_OpImpl::forward() {
 
 const std::string Aidge::Move_Op::Type = "Move";
 
+Aidge::Move_Op::Move_Op()
+    : OperatorTensor(Type, {InputCategory::Data}, 1)
+{
+    mImpl = std::make_shared<Move_OpImpl>(*this);
+}
+
+Aidge::Move_Op::Move_Op(const Aidge::Move_Op& op)
+    : OperatorTensor(op)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Move_Op, *this, {op.getInput(0)->getImpl()->backend(), op.backend()});
+    }
+    else {
+        mImpl = std::make_shared<Move_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Move_Op::clone() const {
+    return std::make_shared<Move_Op>(*this);
+}
+
 void Aidge::Move_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Move_Op>::exists({mInputs[0]->getImpl()->backend(), name})) {
         SET_IMPL_MACRO(Move_Op, *this, {mInputs[0]->getImpl()->backend(), name});
@@ -28,3 +49,9 @@ void Aidge::Move_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Move(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Move_Op>(), name);
+}
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index ded67a11a..e2e32805f 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -23,6 +23,20 @@
 
 const std::string Aidge::Mul_Op::Type = "Mul";
 
+Aidge::Mul_Op::Mul_Op(const Aidge::Mul_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Mul_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Mul_Op::clone() const {
+    return std::make_shared<Mul_Op>(*this);
+}
+
 bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
@@ -56,3 +70,9 @@ void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Mul_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+///////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Mul(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
index c66e6c84a..5b1428c16 100644
--- a/src/operator/Pad.cpp
+++ b/src/operator/Pad.cpp
@@ -10,10 +10,62 @@
  ********************************************************************************/
 
 #include "aidge/operator/Pad.hpp"
+
+#include <array>
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::Pad_Op<DIM>::Type = "Pad";
 
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::Pad_Op<DIM>::clone() const {
+    return std::make_shared<Pad_Op<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::Pad_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        std::array<DimSize_t, DIM + 2> outputDims{};
+        const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
+
+        for (std::size_t dim = 0; dim < DIM; ++dim) {
+            outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
+                                + inputDims[dim+2]
+                                + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
+        }
+        outputDims[1] = inputDims[1];
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+
+    return false;
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Pad_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
 template class Aidge::Pad_Op<1>;
 template class Aidge::Pad_Op<2>;
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Pad(const std::array<Aidge::DimSize_t, 2*DIM> &beginEndTuples,
+                                           const std::string& name,
+                                           const PadBorderType &borderType,
+                                           double borderValue)
+{
+    AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", Pad_Op<DIM>::Type);
+    return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Pad<1>(const std::array<Aidge::DimSize_t, 2> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
+template std::shared_ptr<Aidge::Node> Aidge::Pad<2>(const std::array<Aidge::DimSize_t, 4> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
+template std::shared_ptr<Aidge::Node> Aidge::Pad<3>(const std::array<Aidge::DimSize_t, 6> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
\ No newline at end of file
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 2fcc46a46..5d32a06fd 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -35,8 +35,33 @@ void Aidge::Pop_OpImpl::forward() {
     *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()});
 }
 
+//////////////////////////////////////////////////////////
+
 const std::string Aidge::Pop_Op::Type = "Pop";
 
+Aidge::Pop_Op::Pop_Op()
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0)))
+{
+    mImpl = std::make_shared<Pop_OpImpl>(*this);
+}
+
+Aidge::Pop_Op::Pop_Op(const Aidge::Pop_Op& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Pop_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Pop_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Pop_Op::clone() const {
+    return std::make_shared<Pop_Op>(*this);
+}
+
 bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         auto inputDims = getInput(0)->dims();
@@ -67,3 +92,9 @@ void Aidge::Pop_Op::forward() {
     Operator::forward();
     ++mAttributes->template getAttr<PopAttr::ForwardStep>();
 }
+
+///////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Pop(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Pop_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 2a50f9c7b..1602c8c2a 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -54,4 +54,10 @@ bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Pow_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Pow(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index bdb69452e..e5c4a3e9e 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -26,6 +26,17 @@
 
 const std::string Aidge::Producer_Op::Type = "Producer";
 
+template <std::size_t DIM>
+Aidge::Producer_Op::Producer_Op(
+            const std::array<Aidge::DimSize_t, DIM>& dims,
+            bool constant)
+    : OperatorTensor(Type, {}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ProdAttr::Constant>(constant)))
+{
+    mOutputs[0]->resize(dims);
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
 
 Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant)
     : OperatorTensor(Type, {}, 1),
@@ -59,6 +70,10 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     }
 }
 
+std::shared_ptr<Aidge::Operator> Aidge::Producer_Op::clone() const {
+    return std::make_shared<Producer_Op>(*this);
+}
+
 void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Producer_Op>::exists({name})){
         SET_IMPL_MACRO(Producer_Op, *this, name);
@@ -76,3 +91,75 @@ void Aidge::Producer_Op::forward() {
 
     runHooks();
 }
+
+void Aidge::Producer_Op::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const {
+    if (mAttributes->template getAttr<ProdAttr::Constant>()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
+    }
+    OperatorTensor::setOutput(outputIdx, data);
+}
+
+/////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Producer(const std::array<Aidge::DimSize_t, DIM> &dims,
+        const std::string& name,
+        bool constant)
+{
+  static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
+  return std::make_shared<Node>(std::make_shared<Producer_Op>(dims, constant), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Producer<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<4>(const std::array<Aidge::DimSize_t, 4>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<5>(const std::array<Aidge::DimSize_t, 5>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<6>(const std::array<Aidge::DimSize_t, 6>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<7>(const std::array<Aidge::DimSize_t, 7>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<8>(const std::array<Aidge::DimSize_t, 8>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<9>(const std::array<Aidge::DimSize_t, 9>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<10>(const std::array<Aidge::DimSize_t, 10>&, const std::string&, bool);
+
+std::shared_ptr<Aidge::Node> Aidge::Producer(const std::shared_ptr<Aidge::Tensor> tensor,
+            const std::string& name,
+            bool constant)
+{
+    return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor, constant), name);
+}
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::addProducer(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, DIM>& dims,
+        const std::string& extension)
+{
+    AIDGE_ASSERT(inputIdx < gk_IODefaultIndex, "Input index too high. Cannot create Producer");
+    static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
+    const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
+    auto prod = Producer(dims, prodName);
+    prod->addChild(otherNode, 0, inputIdx);
+    otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
+    return prod;
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<1>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 1>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<2>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 2>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<3>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 3>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<4>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 4>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<5>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 5>& dims,
+        const std::string& extension);
diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp
index 7b945a7d6..03f9e0679 100644
--- a/src/operator/ReLU.cpp
+++ b/src/operator/ReLU.cpp
@@ -19,7 +19,27 @@
 
 const std::string Aidge::ReLU_Op::Type = "ReLU";
 
+Aidge::ReLU_Op::ReLU_Op(const Aidge::ReLU_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(ReLU_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ReLU_Op::clone() const {
+    return std::make_shared<ReLU_Op>(*this);
+}
+
 void Aidge::ReLU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(ReLU_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+/////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ReLU(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 96f2f855f..d80525adc 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -26,6 +26,28 @@
 
 const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 
+Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, Aidge::DimSize_t keep_dims)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ReduceMeanAttr::Axes>(axes),
+        attr<ReduceMeanAttr::KeepDims>(keep_dims)))
+{}
+
+Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ReduceMean_Op::clone() const {
+    return std::make_shared<ReduceMean_Op>(*this);
+}
+
 bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // make Axes attribute positive
@@ -56,4 +78,13 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(ReduceMean_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ReduceMean(const std::vector<std::int32_t> &axes,
+                                        Aidge::DimSize_t keep_dims,
+                                        const std::string& name) {
+    AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
+    return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims), name);
 }
\ No newline at end of file
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index cc31eeea7..5139a0b0c 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -28,8 +28,35 @@ void Aidge::Reshape_OpImpl::forward() {
     op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
 }
 
+//////////////////////////////////////////////////
+
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
+Aidge::Reshape_Op::Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ReshapeAttr::Shape>(shape),
+        attr<ReshapeAttr::AllowZero>(allowzero)))
+{
+    mImpl = std::make_shared<Reshape_OpImpl>(*this);
+}
+
+Aidge::Reshape_Op::Reshape_Op(const Aidge::Reshape_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Reshape_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Reshape_Op::clone() const {
+    return std::make_shared<Reshape_Op>(*this);
+}
+
 bool Aidge::Reshape_Op::dimsForwarded() const {
     if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
@@ -108,3 +135,12 @@ void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t d
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+//////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Reshape(const std::vector<std::int64_t>& shape,
+                            bool allowzero,
+                            const std::string &name)
+{
+    return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape, allowzero), name);
+}
\ No newline at end of file
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index 0d407d4f9..f3a69848e 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -25,6 +25,35 @@
 
 const std::string Aidge::Resize_Op::Type = "Resize";
 
+Aidge::Resize_Op::Resize_Op()
+    : OperatorTensor(Type,
+        {InputCategory::Data,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData},
+        1) {}
+
+/**
+ * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+ * but not its input tensors (the new operator has no input associated).
+ * @param op Operator to copy.
+ */
+
+Aidge::Resize_Op::Resize_Op(const Aidge::Resize_Op& op)
+    : OperatorTensor(op)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Resize_Op, *this, op.backend());
+    }
+    else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Resize_Op::clone() const {
+    return std::make_shared<Resize_Op>(*this);
+}
+
 bool Aidge::Resize_Op::dimsForwarded() const {
     // in case of ROI add getInput(1) condition
     if ((getInput(1) && !getInput(1)->undefined())
@@ -89,10 +118,10 @@ bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
             std::shared_ptr<Tensor> fallback;
             const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
 
-            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {            
+            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {
                 outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
             }
-            
+
             mOutputs[0]->resize(outDims);
             return true;
         }
@@ -101,14 +130,14 @@ bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
         }
     }
 
-    return false; 
+    return false;
 }
 
 void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Resize_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 
-    // By default, automatically set backend for all inputs: roi, scales and sizes 
+    // By default, automatically set backend for all inputs: roi, scales and sizes
     if(getInput(1)) {
         getInput(1)->setBackend(name, device);
     }
@@ -119,3 +148,9 @@ void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
         getInput(3)->setBackend(name, device);
     }
 }
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Resize(const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index dc5e27221..a53695b58 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -20,7 +20,40 @@
 
 const std::string Aidge::Scaling_Op::Type = "Scaling";
 
+Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ScalingAttr::ScalingFactor>(scalingFactor),
+        attr<ScalingAttr::QuantizedNbBits>(nbBits),
+        attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
+{}
+
+Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Scaling_Op::clone() const {
+    return std::make_shared<Scaling_Op>(*this);
+}
+
 void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Scaling_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Scaling(float scalingFactor,
+                                     std::size_t quantizedNbBits,
+                                     bool isOutputUnsigned,
+                                     const std::string& name)
+{
+    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
 }
\ No newline at end of file
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index 39f5e2fe0..f2ad10059 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -30,8 +30,35 @@ void Aidge::Shape_OpImpl::forward() {
                                          end - start + 1);
 }
 
+///////////////////////////////////////////////
+
 const std::string Aidge::Shape_Op::Type = "Shape";
 
+Aidge::Shape_Op::Shape_Op(const std::int64_t start, const std::int64_t end)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ShapeAttr::Start>(start),
+        attr<ShapeAttr::End>(end)))
+{
+    mImpl = std::make_shared<Shape_OpImpl>(*this);
+}
+
+Aidge::Shape_Op::Shape_Op(const Aidge::Shape_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Shape_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Shape_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Shape_Op::clone() const {
+    return std::make_shared<Shape_Op>(*this);
+}
+
 bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         if (this->start() < 0)
@@ -63,3 +90,9 @@ void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+//////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Shape(const std::int64_t start, const std::int64_t end, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
+}
\ No newline at end of file
diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp
index ede83e291..63480ffcc 100644
--- a/src/operator/ShiftGELU.cpp
+++ b/src/operator/ShiftGELU.cpp
@@ -21,7 +21,29 @@
 
 const std::string Aidge::ShiftGELU_Op::Type = "ShiftGELU";
 
+Aidge::ShiftGELU_Op::ShiftGELU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::ShiftGELU_Op::ShiftGELU_Op(const Aidge::ShiftGELU_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ShiftGELU_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ShiftGELU_Op::clone() const {
+    return std::make_shared<ShiftGELU_Op>(*this);
+}
+
 void Aidge::ShiftGELU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(ShiftGELU_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+///////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ShiftGELU(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ShiftGELU_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp
index eb77ae655..5b0dd7ace 100644
--- a/src/operator/ShiftMax.cpp
+++ b/src/operator/ShiftMax.cpp
@@ -21,7 +21,33 @@
 
 const std::string Aidge::ShiftMax_Op::Type = "ShiftMax";
 
+Aidge::ShiftMax_Op::ShiftMax_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::ShiftMax_Op::ShiftMax_Op(const Aidge::ShiftMax_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ShiftMax_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+/**
+ * @brief Clone the operator using its copy-constructor.
+ * @see Operator::ShiftMax_Op
+ */
+std::shared_ptr<Aidge::Operator> Aidge::ShiftMax_Op::clone() const {
+    return std::make_shared<ShiftMax_Op>(*this);
+}
+
 void Aidge::ShiftMax_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(ShiftMax_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+/////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ShiftMax(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ShiftMax_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp
index a6edcf823..aa112378f 100644
--- a/src/operator/Sigmoid.cpp
+++ b/src/operator/Sigmoid.cpp
@@ -20,7 +20,30 @@
 
 const std::string Aidge::Sigmoid_Op::Type = "Sigmoid";
 
+Aidge::Sigmoid_Op::Sigmoid_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::Sigmoid_Op::Sigmoid_Op(const Aidge::Sigmoid_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Sigmoid_Op::clone() const {
+    return std::make_shared<Sigmoid_Op>(*this);
+}
+
+
 void Aidge::Sigmoid_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Sigmoid_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+///////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Sigmoid(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Sigmoid_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 4fcfd587a..bd7a4750d 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -11,7 +11,6 @@
 
 #include "aidge/operator/Slice.hpp"
 
-#include <cassert>
 #include <cstddef>
 #include <cstdint>
 #include <string>
@@ -28,6 +27,41 @@
 
 const std::string Aidge::Slice_Op::Type = "Slice";
 
+Aidge::Slice_Op::Slice_Op(const std::vector<std::int64_t>& starts,
+                        const std::vector<std::int64_t>& ends,
+                        const std::vector<std::int8_t>& axes,
+                        const std::vector<std::int64_t>& steps)
+    : OperatorTensor(Type,
+        {InputCategory::Data,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData},
+        1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<SliceAttr::Starts>(starts),
+        attr<SliceAttr::Ends>(ends),
+        attr<SliceAttr::Axes>(axes),
+        attr<SliceAttr::Steps>(steps)))
+{}
+
+Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op &op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Slice_Op, *this, op.backend());
+    }
+    else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Slice_Op::clone() const {
+    return std::make_shared<Slice_Op>(*this);
+}
+
+
 bool Aidge::Slice_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined())
         || (getInput(2) && !getInput(2)->undefined())
@@ -177,3 +211,13 @@ void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     SET_IMPL_MACRO(Slice_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Slice(const std::vector<std::int64_t>& starts,
+                                   const std::vector<std::int64_t>& ends,
+                                   const std::vector<std::int8_t>& axes,
+                                   const std::vector<std::int64_t>& steps,
+                                   const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes, steps), name);
+}
\ No newline at end of file
diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp
index 612c61b0f..f425d6fff 100644
--- a/src/operator/Softmax.cpp
+++ b/src/operator/Softmax.cpp
@@ -20,7 +20,34 @@
 
 const std::string Aidge::Softmax_Op::Type = "Softmax";
 
+Aidge::Softmax_Op::Softmax_Op(std::int32_t axis)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<SoftmaxAttr::Axis>(axis)))
+{}
+
+Aidge::Softmax_Op::Softmax_Op(const Aidge::Softmax_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Softmax_Op::clone() const {
+    return std::make_shared<Softmax_Op>(*this);
+}
+
 void Aidge::Softmax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Softmax_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Softmax(std::int32_t axis, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
 }
\ No newline at end of file
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index af7474d8a..9c56c6a2a 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -52,8 +52,37 @@ void Aidge::Split_OpImpl::forward() {
     }
 }
 
+/////////////////////////////////////////////////////
+
 const std::string Aidge::Split_Op::Type = "Split";
 
+Aidge::Split_Op::Split_Op(std::int8_t axis,
+                        Aidge::DimSize_t nbOutputs,
+                        const std::vector<Aidge::DimSize_t>& split)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<SplitAttr::Axis>(axis),
+        attr<SplitAttr::Split>(split)))
+{
+    mImpl = std::make_shared<Split_OpImpl>(*this);
+}
+
+Aidge::Split_Op::Split_Op(const Aidge::Split_Op &op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Split_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Split_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Split_Op::clone() const {
+    return std::make_shared<Split_Op>(*this);
+}
+
 bool Aidge::Split_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined()))
     {
@@ -120,7 +149,7 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
 
         return true;
     }
-    
+
     return false;
 }
 
@@ -135,5 +164,14 @@ void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     {
         mOutputs[i]->setBackend(name, device);
     }
-    
+
 }
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Split(Aidge::DimSize_t nbOutput,
+                                   std::int8_t axis,
+                                   const std::vector<Aidge::DimSize_t>& split,
+                                   const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
+}
\ No newline at end of file
diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp
index d8ac8b8b0..579d63b31 100644
--- a/src/operator/Sqrt.cpp
+++ b/src/operator/Sqrt.cpp
@@ -14,13 +14,35 @@
 #include <memory>
 #include <string>
 
+#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 const std::string Aidge::Sqrt_Op::Type = "Sqrt";
 
+Aidge::Sqrt_Op::Sqrt_Op(const Aidge::Sqrt_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Sqrt_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+
+std::shared_ptr<Aidge::Operator> Aidge::Sqrt_Op::clone() const {
+    return std::make_shared<Sqrt_Op>(*this);
+}
+
 void Aidge::Sqrt_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Sqrt_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////
+
+inline std::shared_ptr<Aidge::Node> Aidge::Sqrt(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index 858b32bea..ee4fd5b08 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -24,6 +24,20 @@
 
 const std::string Aidge::Sub_Op::Type = "Sub";
 
+Aidge::Sub_Op::Sub_Op(const Aidge::Sub_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Sub_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Sub_Op::clone() const {
+    return std::make_shared<Sub_Op>(*this);
+}
+
 bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
@@ -57,3 +71,9 @@ void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Sub_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+//////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Sub(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
+}
diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp
index c113ee6f2..1f936b6c8 100644
--- a/src/operator/Tanh.cpp
+++ b/src/operator/Tanh.cpp
@@ -20,7 +20,29 @@
 
 const std::string Aidge::Tanh_Op::Type = "Tanh";
 
+Aidge::Tanh_Op::Tanh_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::Tanh_Op::Tanh_Op(const Aidge::Tanh_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Tanh_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Tanh_Op::clone() const {
+    return std::make_shared<Tanh_Op>(*this);
+}
+
 void Aidge::Tanh_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Tanh_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Tanh(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Tanh_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 30372e44f..bd1acee8a 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -28,8 +28,34 @@ void Aidge::TransposeImpl::forward() {
     op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.outputDimsOrder());
 }
 
+///////////////////////////////////////////////////
+
 const std::string Aidge::Transpose_Op::Type = "Transpose";
 
+Aidge::Transpose_Op::Transpose_Op(const std::vector<Aidge::DimSize_t> &outputDimsOrder)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)))
+{
+    mImpl = std::make_shared<TransposeImpl>(*this);
+}
+
+Aidge::Transpose_Op::Transpose_Op(const Aidge::Transpose_Op& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<TransposeImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Transpose_Op::clone() const {
+    return std::make_shared<Transpose_Op>(*this);
+}
+
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         AIDGE_ASSERT(!getInput(0)->empty(), "Not applicable on scalars.");
@@ -52,3 +78,10 @@ void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+//////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Transpose(const std::vector<Aidge::DimSize_t> &outputDimsOrder,
+                                           const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
+}
\ No newline at end of file
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
index 94c970fd3..2b12f3358 100644
--- a/src/operator/Unfold.cpp
+++ b/src/operator/Unfold.cpp
@@ -65,9 +65,44 @@ void Aidge::Unfold_OpImpl<DIM>::forward() {
     }
 }
 
+template class Aidge::Unfold_OpImpl<2>;
+
+/////////////////////////////////////////////////////////////
+
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::Unfold_Op<DIM>::Type = "Unfold";
 
+template <Aidge::DimIdx_t DIM>
+Aidge::Unfold_Op<DIM>::Unfold_Op(const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                    const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                    const std::array<Aidge::DimSize_t, DIM> &dilationDims)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<UnfoldAttr::StrideDims>(strideDims),
+        attr<UnfoldAttr::DilationDims>(dilationDims),
+        attr<UnfoldAttr::KernelDims>(kernelDims)))
+{
+    mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+Aidge::Unfold_Op<DIM>::Unfold_Op(const Aidge::Unfold_Op<DIM> &op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::Unfold_Op<DIM>::clone() const {
+    return std::make_shared<Unfold_Op>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -103,5 +138,20 @@ void Aidge::Unfold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx
     mOutputs[0]->setBackend(name, device);
 }
 
-template class Aidge::Unfold_OpImpl<2>;
-template class Aidge::Unfold_Op<2>;
\ No newline at end of file
+template class Aidge::Unfold_Op<2>;
+
+///////////////////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Unfold(const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilationDims) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
+    return std::make_shared<Node>(std::make_shared<Unfold_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Unfold<2>(const std::array<Aidge::DimSize_t, 2>&,
+                                  const std::string&,
+                                  const std::array<Aidge::DimSize_t, 2>&,
+                                  const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
-- 
GitLab