diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
index 31bae71e9b433d1b82ffe62d93837f440c8a936f..fc8bfb3353352186b23459e1ca82505827c28345 100644
--- a/include/aidge/graph/Matching.hpp
+++ b/include/aidge/graph/Matching.hpp
@@ -29,6 +29,12 @@ namespace Aidge {
 class SinglePassGraphMatching {
 public:
     struct Context {
+        Context();
+        Context(const Context&); // explicitly define Context copy constructor
+                                 // to avoid automatic inlining
+        Context& operator=(const Context&);
+        ~Context() noexcept;
+
         std::string query;
         bool firstSequence = true;
         bool firstNode = true;
@@ -52,44 +58,36 @@ public:
         mutable std::map<std::string, std::map<std::string, NodePtr>> anchors;
         mutable NodePtr startNode;
 
-        MatchingResult() {
-            graph = std::make_shared<GraphView>();
-        }
+        MatchingResult();
 
-        MatchingResult(const MatchingResult& result) {
-            graph = std::make_shared<GraphView>(*(result.graph.get()));
-            anchors = result.anchors;
-            startNode = result.startNode;
-        }
-
-        MatchingResult& operator=(const MatchingResult& result) {
-            graph = std::make_shared<GraphView>(*(result.graph.get()));
-            anchors = result.anchors;
-            startNode = result.startNode;
-            return *this;
-        }
+        MatchingResult(const MatchingResult& other);
+        MatchingResult& operator=(const MatchingResult& other);
+        ~MatchingResult() noexcept;
     };
 
     SinglePassGraphMatching(std::shared_ptr<GraphView> graph) : mGraph(graph) {}
+    SinglePassGraphMatching(const SinglePassGraphMatching& other);
+    SinglePassGraphMatching& operator=(const SinglePassGraphMatching& other);
+    ~SinglePassGraphMatching() noexcept;
 
     /**
      * Matches a query by direct, single pass parse and match.
      * The returned matches are non-ordered and therefore stored in a std::set.
-     * 
+     *
      * Some rules:
      * - The first node of the first sequence is the root node and cannot be optional
      *   WRONG: Conv?->ReLU (will throw an error)
      *   GOOD: ReLU<-Conv?
-     * 
+     *
      * - The first node of any further sequence must be an existing anchor
      *   (the anchor cannot be in the middle of the sequence)
      *   WRONG: Conv->ReLU;Pad->Conv (will throw an error)
      *          Pad->Conv;Conv->ReLU (will throw an error)
      *   GOOD: Conv#->ReLU;Conv#<-Pad
      *         Pad->Conv#;Conv#->ReLU
-     * 
+     *
      * - Any node already matched cannot be matched again (except for anchors)
-     * 
+     *
      * - By default, an edge matches the first output to the first input.
      *   EXAMPLE: ReLU->Conv is equivalent to ReLU-0-0>Conv
      *            To match the second input, use ReLU-0-1>Conv (or ReLU-1>Conv)
@@ -97,14 +95,14 @@ public:
      *            To match any input and/or any output, use *, like ReLU-1-*>Conv
      *            or ReLU-*-0>Conv or ReLU-*-*>Conv
      *            The same is true for the "<-" edge syntax.
-     * 
+     *
      * - When several nodes could match for a given node query, the first one
-     *   not already in the matching result is matched, following the 
+     *   not already in the matching result is matched, following the
      *   childs/parents ordered node list
      *   EXAMPLE: Producer in "Conv<*-Producer" will match the weights Producer first
      *   EXAMPLE: Producer in "Conv#<1-.;Conv#<*-Producer" will match the bias Producer
      *            because the weights Producer has already been matched
-     * 
+     *
      * - One always matches a sub-graph: additional connections can exist anywhere
      *   in the matched sub-graph
      *   EXAMPLE: "Add<*-." will match the Add operator and its first input, any
@@ -112,7 +110,7 @@ public:
      *   EXAMPLE: "(Add#<*-.)+" will match the Add operator and all of its inputs
      *            Note that the anchor is required since we intend to match several
      *            inputs of the same node!
-     * 
+     *
      * - In Aidge, a node output can be connected to multiple other nodes. In
      *   your query, you can allow it or not, with the "~" or "-" modifier.
      *   EXAMPLE: "Conv->ReLU" will match the Conv that are **only** connected
@@ -121,7 +119,7 @@ public:
      *            if they are also connected to other nodes at the same output #0.
      *   When implementing a match & replace recipe, beware that you don't break
      *   branches in the middle of your matching result if you use "~"!
-     * 
+     *
      * - The matching results can be overlapping, meaning that some nodes may be
      *   found in multiple results. Some results may be subsets of other results.
      *   EXAMPLE: assume graph Conv#1->ReLU#1->Conv#2->ReLU#2
@@ -129,11 +127,11 @@ public:
      *            Conv#1->ReLU#1->Conv#2->ReLU#2 and Conv#2->ReLU#2
      *   To avoid this behavior, set the disjoint argument to true. In this case,
      *   only Conv#1->ReLU#1->Conv#2->ReLU#2 will be kept in the example above.
-     * 
+     *
      * - Whitespaces are allowed anywhere in the query
-     * 
+     *
      * QUERY = SEQ | NODE_OR_BLOCK (';' (SEQ | NODE_OR_BLOCK))*
-     * 
+     *
      * @param query The query to search.
      * @param disjoint If true, only keep the longuest disjoint (non-overlapping) matches.
      * @return Set of matches, each stored in a MatchingResult struct.
diff --git a/include/aidge/graph/OpArgs.hpp b/include/aidge/graph/OpArgs.hpp
index 9d1ba6fd1e1df594634bfd93a24663ff178b7ee6..bf14d39af34c2e14d98906a663edf335c30c6f12 100644
--- a/include/aidge/graph/OpArgs.hpp
+++ b/include/aidge/graph/OpArgs.hpp
@@ -34,6 +34,10 @@ public:
     OpArgs(const std::shared_ptr<Node>& node_)
      : mNode(node_) {assert(mNode && "The Node provided should not be a nullptr.");}
 
+    OpArgs(const OpArgs&);
+    OpArgs& operator=(const OpArgs&);
+    ~OpArgs() noexcept;
+
     inline std::shared_ptr<Node> node() const noexcept {
         return mNode;
     }
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 0e709afe9f175443a28947be7f4c3f5b01f5e362..97db476729abc07985b16de62084be5fce603bc9 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -28,13 +28,7 @@ class Add_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Add_Op(const IOIndex_t nbIn)
-        : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
-    {
-        if (nbIn == 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
-        }
-    }
+    Add_Op(const IOIndex_t nbIn);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -46,9 +40,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Add_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Add_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
@@ -72,9 +64,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
-}
+std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_ADD_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 920829473d856b2a4c14fc0859abcd4c3b70277a..b2f4ce92580afddcc7aa3627ea0ef89d4ac3ffee 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -64,9 +64,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::AvgPooling_Op
      */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<AvgPooling_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override final;
 
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
@@ -93,12 +91,9 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                            const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
-    return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
-}
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 08d1f6a88d394e34dd6e351f500429113a52c9fa..7f1f63c68a512c4b6a59a515d6130afe9696a8c2 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -64,9 +64,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::BatchNorm_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<BatchNorm_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
@@ -103,11 +101,11 @@ std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures,
                                        const float epsilon = 1.0e-5F,
                                        const float momentum = 0.1F,
                                        const std::string& name = "");
+}  // namespace Aidge
 
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&);
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&);
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&);
-}  // namespace Aidge
 
 namespace {
 template <>
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 291669b7c57c14a77ffa6b40fa2aefab8d281fc7..fd12f551a2251f3dfe8ea0a0d0528d9dad742e42 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -86,9 +86,8 @@ public:
 };
 
 
-inline std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
-}
+std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "");
+
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index ab14bf527dd9949f3bb2b6157619e58c7c7580ee..46cd3a5a328984bde7e537d984b30e0774a3d259 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -50,40 +50,19 @@ private:
 public:
     Concat_Op() = delete;
 
-    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis)
-        : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ConcatAttr::Axis>(axis)))
-    {
-        if (nbIn == 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
-        }
-        mImpl = std::make_shared<Concat_OpImpl>(*this);
-    }
+    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Concat_Op(const Concat_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Concat_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Concat_OpImpl>(*this);
-        }
-    }
+    Concat_Op(const Concat_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Concat_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Concat_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -100,9 +79,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
-}
+std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "");
 }
 
 namespace {
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index e89c94f968ab89f43e6ef2d95a40a6f557cc41c7..7366472d24b78b58aab589ea2b3ccd045e4a5ea7 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -140,22 +140,13 @@ public:
  * @return std::shared_ptr<Node> A Node containing the operator.
  */
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
+std::shared_ptr<Node> Conv(DimSize_t inChannels,
                                   DimSize_t outChannels,
                                   const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                                  bool noBias = false) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
-    addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
-    if (!noBias) {
-        addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
-    }
-    return conv;
-}
+                                  bool noBias = false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 1acf240bfcdd256953cd96b92e3622a265aafa0b..63d8e8419b47279c51783db057b5b1a63c7d0884 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -105,21 +105,12 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
+std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            const std::array<DimSize_t, DIM> &kernelDims,
                                            const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                                            const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                                           bool noBias=false) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
-    addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
-    if (!noBias) {
-        addProducer(convDW, 2, {nbChannels}, "b");
-    }
-    return convDW;
-}
+                                           bool noBias=false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 3edb4a28851cffe060886a4660d6b524eb9b814a..b16a5e6733e8846b05e3e491cf5bc7f793d97f1c 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -66,9 +66,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Div(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
-}
-}
+std::shared_ptr<Node> Div(const std::string& name = "");
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_DIV_H_ */
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index f615fedeef6fea59d2177cf886e8d910f064f5c2..b6cc8f30c0fff3366cb1d3fea678e4cad8f9cb10 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -35,23 +35,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Erf_Op(const Erf_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Erf_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Erf_Op(const Erf_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Erf_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Erf_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
@@ -63,9 +53,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Erf(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Erf_Op>(), name);
-}
+std::shared_ptr<Node> Erf(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_ERF_H_ */
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 01da37a05414c5994ace767770e7c26fc8cd4646..f1996fbae025838e2e6f6c21c70018c7cc9746f5 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -53,9 +53,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::FC_Op
      */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<FC_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override final;
 
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
@@ -78,15 +76,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
-    addProducer(fc, 1, {outChannels, inChannels}, "w");
-    if (!noBias) {
-        addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
-    }
-    return fc;
-}
+std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index caf904e870425c000687ccd95397c92744020eec..aebe3879b94fd13c8226fffe42e513715d8e3e5a 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -67,25 +67,13 @@ public:
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Fold_Op(const Fold_Op<DIM> &op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
-        }
-        else {
-            mImpl = nullptr;
-        }
-    }
+    Fold_Op(const Fold_Op<DIM> &op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Fold_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Fold_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -106,15 +94,11 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Fold(const std::array<DimSize_t, DIM> &outputDims,
+std::shared_ptr<Node> Fold(const std::array<DimSize_t, DIM> &outputDims,
                                   const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported");
-    return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
-}
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
 
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> Fold(
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 5f3917e486e2e2188bfd23bd58a13b51d5fc7a59..f2e3b0fe8c063a5eec5e0c2140c3b7eabf3ee68a 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -55,39 +55,19 @@ public:
 
     Gather_Op(std::int8_t axis,
               const std::vector<int64_t>& indices,
-              const std::vector<DimSize_t>& gatheredShape)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
-        mAttributes(std::make_shared<Attributes_>(
-            attr<GatherAttr::Axis>(axis),
-            attr<GatherAttr::Indices>(indices),
-            attr<GatherAttr::GatheredShape>(gatheredShape)))
-    {
-        mImpl = std::make_shared<Gather_OpImpl>(*this);
-    }
+              const std::vector<DimSize_t>& gatheredShape);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Gather_Op(const Gather_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Gather_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Gather_OpImpl>(*this);
-        }
-    }
+    Gather_Op(const Gather_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Gather_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Gather_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -107,9 +87,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Gather_Op>(axis, indices, gatheredShape), name);
-}
+std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = "");
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 8196c4268e669001d99f25ed2cead546e1141aa7..41516a39723249b5b5c715a66ce3398dff8e65b1 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -35,43 +35,23 @@ private:
     const std::shared_ptr<DynamicAttributes> mAttributes;
 
 public:
-    GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut)
-        : OperatorTensor(type, inputsCategory, nbOut)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+    GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut);
 
-    GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
-        : OperatorTensor(type, [nbData, nbParam]() {
-                                std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
-                                inputsCategory.resize(nbData + nbParam, InputCategory::Param);
-                                return inputsCategory;
-                            }(), nbOut),
-          mAttributes(std::make_shared<DynamicAttributes>())
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+    GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    GenericOperator_Op(const GenericOperator_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
-    }
+    GenericOperator_Op(const GenericOperator_Op& op);
 
-    ~GenericOperator_Op() = default;
+    ~GenericOperator_Op() noexcept;
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::GenericOperator_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<GenericOperator_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 public:
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -111,10 +91,8 @@ public:
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
-inline std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector<InputCategory>& inputCategory, IOIndex_t nbOut,
-                                             const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, inputCategory, nbOut), name);
-}
+std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector<InputCategory>& inputCategory, IOIndex_t nbOut,
+                                             const std::string& name = "");
 
 /**
  * @brief Fictive custom operator not associated with any implementation.
@@ -126,10 +104,8 @@ inline std::shared_ptr<Node> GenericOperator(const std::string& type, const std:
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
-inline std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
-                                             const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
-}
+std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
+                                             const std::string& name = "");
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ */
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 8bb738e8b57598e4256d3850fc791976e73c834c..734e12344fed4cd25dd41e91dc8cfb18fea4fd45 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -39,18 +39,9 @@ public:
 
   GlobalAveragePooling_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
-  GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op)
-      : OperatorTensor(op) {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-  }
+  GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op);
 
-  std::shared_ptr<Operator> clone() const override {
-    return std::make_shared<GlobalAveragePooling_Op>(*this);
-  }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -64,11 +55,8 @@ public:
   }
 };
 
-inline std::shared_ptr<Node>
-GlobalAveragePooling(const std::string &name = "") {
-  return std::make_shared<Node>(std::make_shared<GlobalAveragePooling_Op>(),
-                                name);
-}
+std::shared_ptr<Node> GlobalAveragePooling(const std::string &name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_GLOBAL_AVERAGE_POOLING_H_ */
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index e07df59d888993cb33da9c20393d897ab9cf1804..622d6290af55ef5a717c6f5763ade5a2750fb9f0 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -27,8 +27,6 @@
 
 namespace Aidge {
 
-
-
 /**
  * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
  * This Operator has no Implementation, it just forward its input Tensor.
@@ -41,29 +39,20 @@ class Identity_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Identity_Op()
-        : OperatorTensor(Type, {InputCategory::Data}, 1)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+    Identity_Op();
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Identity_Op(const Identity_Op& op)
-        : OperatorTensor(op)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
-    }
+    Identity_Op(const Identity_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Identity_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Identity_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     // bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
 
@@ -75,9 +64,7 @@ public:
      * @return true Input has dimensions.
      * @return false Input has no dimensions or is a nullptr.
      */
-    bool dimsForwarded() const override final {
-        return mInputs[0] ? (mInputs[0]->undefined() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
-    }
+    bool dimsForwarded() const override final;
 
 
     void forward() override final;
@@ -99,9 +86,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Identity(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Identity_Op>(), name);
-}
+std::shared_ptr<Node> Identity(const std::string& name = "");
+
 }
 
 #endif /* AIDGE_CORE_OPERATOR_IDENTITY_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 3057b99f70fa3693f7e434be29dcd40fb98d4bea..30d171eab3ee54864aae48f445e4d0f04792dd31 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -17,7 +17,6 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Data.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -54,31 +53,15 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    LeakyReLU_Op(const LeakyReLU_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    LeakyReLU_Op(const LeakyReLU_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::LeakyReLU_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<LeakyReLU_Op>(*this);
-    }
-
+    std::shared_ptr<Operator> clone() const override;
 
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
@@ -91,9 +74,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
-}
+std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "");
 }
 
 namespace {
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Ln.hpp
index d4010471c9af853556dbe1d60c8585d12f8fc638..c6a9ec4c8d59800cdbcc3f0229acdbbb436cd732 100755
--- a/include/aidge/operator/Ln.hpp
+++ b/include/aidge/operator/Ln.hpp
@@ -36,23 +36,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Ln_Op(const Ln_Op& op)
-        : OperatorTensor(op)
-    {
-       if (op.mImpl){
-            SET_IMPL_MACRO(Ln_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Ln_Op(const Ln_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Ln_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Ln_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -65,9 +55,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Ln(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Ln_Op>(), name);
-}
+std::shared_ptr<Node> Ln(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_LN_H_ */
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index be460ee88bd79592e29581f6acd64813ecc39bec..f81fb7bd0a3156fcffccc10fe3d460273f353252 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -36,22 +36,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    MatMul_Op(const MatMul_Op& op) : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(MatMul_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    MatMul_Op(const MatMul_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MatMul_Op
      */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<MatMul_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override final;
 
     /**
      * @brief Compute dimensions for the output Tensor following the same rules as
@@ -77,9 +68,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> MatMul(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
-}
+std::shared_ptr<Node> MatMul(const std::string& name = "");
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MATMUL_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 7e2c68681e645133812103a94e4c39ab9d1dc970..3b7473a6a17e8ebf490941068c8245d5847e0299 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -20,7 +20,6 @@
 #include <stdexcept>   // std::runtime_error
 #include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
@@ -51,71 +50,25 @@ private:
 public:
     MaxPooling_Op() = delete;
 
-    constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+    MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                            bool ceil_mode = false)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<MaxPoolingAttr::StrideDims>(stride_dims),
-            attr<MaxPoolingAttr::KernelDims>(kernel_dims),
-            attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
-        {}
+                            bool ceil_mode = false);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    MaxPooling_Op(const MaxPooling_Op<DIM>& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    MaxPooling_Op(const MaxPooling_Op<DIM>& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MaxPooling_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<MaxPooling_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        if (inputsAssociated()) {
-            std::array<DimSize_t, DIM + 2> outputDims{};
-            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-
-            std::function<float(float)> roundingFunction;
-            if (mAttributes->template getAttr<MaxPoolingAttr::CeilMode>()) {
-                roundingFunction = [](float x) { return std::ceil(x); };
-            } else {
-                roundingFunction = [](float x) { return std::floor(x); };
-            }
-
-            for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
-                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            roundingFunction(static_cast<float>(inputDims[dim+2] -
-                                                                    mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
-                                            static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
-            }
-            outputDims[1] = inputDims[1];
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-            return true;
-        }
-        return false;
-    }
-
-
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
@@ -130,17 +83,15 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string MaxPooling_Op<DIM>::Type = "MaxPooling";
+extern template class Aidge::MaxPooling_Op<1>;
+extern template class Aidge::MaxPooling_Op<2>;
+extern template class Aidge::MaxPooling_Op<3>;
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                            const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                           bool ceil_mode=false) {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
-    return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, ceil_mode), name);
-}
+                                           bool ceil_mode=false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index bb652e833ad06df37f55d3582afd0e66cc3e97c8..a1d90f06f098eb7fa2fc199b595991702daf488a 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -50,40 +50,20 @@ private:
 public:
     Memorize_Op() = delete;
 
-    Memorize_Op(const std::uint32_t endStep)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
-          mAttributes(std::make_shared<Attributes_>(
-                        attr<MemorizeAttr::ScheduleStep>(0),
-                        attr<MemorizeAttr::ForwardStep>(0),
-                        attr<MemorizeAttr::EndStep>(endStep)))
-    {
-        mOutputs[1] = mOutputs[0];
-    }
+    Memorize_Op(const std::uint32_t endStep);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
      * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Memorize_Op(const Memorize_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-        mOutputs[1] = mOutputs[0];
-    }
+    Memorize_Op(const Memorize_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Memorize_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Memorize_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -105,9 +85,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
-}
+std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 744564b4bd591d84b871a6af71c4a54589103485..69f2120d90beb727bd661628c362410066ae3cff 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -50,7 +50,7 @@ public:
     /**
      * Set the node that should be used for the scheduling.
     */
-    void setUpperNode(std::shared_ptr<Node> node) {
+    inline void setUpperNode(std::shared_ptr<Node> node) {
         mUpperNode = node;
     }
 
@@ -58,9 +58,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MetaOperator_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<MetaOperator_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     inline const std::shared_ptr<GraphView>& getMicroGraph() const noexcept {
         return mGraph;
@@ -82,17 +80,7 @@ public:
     }
 
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        if (Registrar<MetaOperator_Op>::exists({name, type()})) {
-            // A custom implementation exists for this meta operator
-            mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
-        }
-
-        // The micro-graph should always be set to the right backend, since it
-        // shares input/output tensors.
-        // Input/output tensors backend are updated here.
-        mGraph->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     void setDataType(const DataType &datatype) const override {
         // The micro-graph should always be set to the right data type, since it
@@ -118,15 +106,9 @@ public:
 
 };
 
-inline std::shared_ptr<Node> MetaOperator(const char *type,
+std::shared_ptr<Node> MetaOperator(const char *type,
                                   const std::shared_ptr<GraphView>& graph,
-                                  const std::string& name = "")
-{
-    auto op = std::make_shared<MetaOperator_Op>(type, graph);
-    auto node = std::make_shared<Node>(op, name);
-    op->setUpperNode(node);
-    return node;
-}
+                                  const std::string& name = "");
 }  // namespace Aidge
 
 #endif /* MetaOperator_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 51681629cbae215fd529b6e7bb568d07264dd63e..bc3348377525cdd2e5b2c030c8fc6b7cb8177e7b 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -33,43 +33,25 @@ namespace Aidge {
 
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
+extern std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
                                   DimSize_t out_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
-{
-    // Construct micro-graph
-    auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
-
-    auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
-    addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
-    if (!no_bias) {
-        addProducer(metaOp, 2, {out_channels}, "b");
-    }
-    return metaOp;
-}
+                                  bool no_bias = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
+extern std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
-{
-    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
-
-    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
-}
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
-inline std::shared_ptr<Node> PaddedConv(
+extern std::shared_ptr<Node> PaddedConv(
     DimSize_t in_channels,
     DimSize_t out_channels,
     DimSize_t const (&kernel_dims)[DIM],
@@ -77,46 +59,25 @@ inline std::shared_ptr<Node> PaddedConv(
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-    bool no_bias = false)
-{
-    return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
-}
+    bool no_bias = false);
 
 ////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
+std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
-{
-    // Construct micro-graph
-    auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
-
-    auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
-    addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
-    if (!no_bias) {
-        addProducer(metaOp, 2, {nb_channels}, "b");
-    }
-    return metaOp;
-}
+                                  bool no_bias = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
+std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
-{
-    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
-
-    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
-}
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
@@ -127,10 +88,7 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-    bool no_bias = false)
-{
-    return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
-}
+    bool no_bias = false);
 
 ////////////////////////////////////////////////////////////////////////////////
 
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index cf5a3f188424fc52849eab580cce624ff714c729..9908911419d8ce027cdb18c4abf45a5c71be67b1 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -35,32 +35,19 @@ class Move_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Move_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {
-        mImpl = std::make_shared<Move_OpImpl>(*this);
-    }
+    Move_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Move_Op(const Move_Op& op)
-        : OperatorTensor(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Move_Op, *this, {op.getInput(0)->getImpl()->backend(), op.backend()});
-        }
-        else {
-            mImpl = std::make_shared<Move_OpImpl>(*this);
-        }
-    }
+    Move_Op(const Move_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Move_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Move_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
@@ -72,9 +59,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Move(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Move_Op>(), name);
-}
-}
+std::shared_ptr<Node> Move(const std::string& name = "");
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index e61393b28fc45bf46487ac2277753dec1b297b81..35a4b7e061bba76f1e63343e9230eddddfde11ac 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -39,23 +39,13 @@ public:
      * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Mul_Op(const Mul_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Mul_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Mul_Op(const Mul_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Mul_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Mul_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -69,9 +59,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Mul(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
-}
+std::shared_ptr<Node> Mul(const std::string& name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 215fafb7fee10587dec38e77685d705f7c1bb980..bdb5330a6fd02693f4d75ccba06ce613d9a0dff1 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -17,10 +17,8 @@
 #include <string>
 #include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
@@ -70,34 +68,12 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pad_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Pad_Op<DIM>>(*this);
-    }
-
+    std::shared_ptr<Operator> clone() const override;
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        if (inputsAssociated()) {
-            std::array<DimSize_t, DIM + 2> outputDims{};
-            const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
 
-            for (std::size_t dim = 0; dim < DIM; ++dim) {
-                outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
-                                    + inputDims[dim+2]
-                                    + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
-            }
-            outputDims[1] = inputDims[1];
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-            return true;
-        }
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-        return false;
-    }
-
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); }
@@ -113,14 +89,10 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
-                                           const std::string& name = "",
-                                           const PadBorderType &borderType = PadBorderType::Constant,
-                                           double borderValue = 0.0)
-{
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
-    return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
-}
+std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
+                        const std::string& name = "",
+                        const PadBorderType &borderType = PadBorderType::Constant,
+                        double borderValue = 0.0);
 
 // helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index fb3b32eeacf2e199df88b6bd0256cf6cbdaa1065..41ab3c537eacc88920419cb5e0deecc4720796ba 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -44,36 +44,19 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    Pop_Op()
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0)))
-    {
-        mImpl = std::make_shared<Pop_OpImpl>(*this);
-    }
+    Pop_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Pop_Op(const Pop_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Pop_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Pop_OpImpl>(*this);
-        }
-    }
+    Pop_Op(const Pop_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pop_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Pop_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -92,9 +75,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Pop(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Pop_Op>(), name);
-}
+std::shared_ptr<Node> Pop(const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index ee5c01c2121d68a7988dc686c4dbb4bbf7331c84..eaf4297fd8b3751463a20ae219af5c25ecd789ae 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -66,9 +66,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Pow(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
-}
+std::shared_ptr<Node> Pow(const std::string& name = "");
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_POW_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 1647c563d38ab4931cc3a0c2a4281555215f990e..257a6965be4c08735f23ae575ffe104bb706593a 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -45,14 +45,7 @@ public:
 
     template <std::size_t DIM>
     Producer_Op(const std::array<DimSize_t, DIM>& dims,
-                bool constant = false)
-        : OperatorTensor(Type, {}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ProdAttr::Constant>(constant)))
-    {
-        mOutputs[0]->resize(dims);
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+                bool constant = false);
 
     /**
      * @brief Construct a new Producer_Op object from a Tensor.
@@ -82,15 +75,13 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Producer_Op(const Producer_Op&)
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Producer_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
     }
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
+    inline bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
 
     inline bool dimsForwarded() const noexcept override final { return true; }
 
@@ -115,19 +106,11 @@ public:
         // fmt::print("Basic Producer backward() function.\n");
     }
 
-    void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const override {
-        if (mAttributes->template getAttr<ProdAttr::Constant>()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
-        }
-        OperatorTensor::setOutput(outputIdx, data);
-    }
+    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override;
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "", bool constant = false) {
-  static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
-  return std::make_shared<Node>(std::make_shared<Producer_Op>(dims, constant), name);
-}
+std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "", bool constant = false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <std::size_t DIM>
@@ -135,20 +118,13 @@ inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::s
   return Producer(to_array(dims), name, constant);
 }
 
-inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "", bool constant = false) {
-  return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor, constant), name);
-}
+std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "", bool constant = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) {
-    assert(inputIdx != gk_IODefaultIndex);
-    static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
-    const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
-    auto prod = Producer(dims, prodName);
-    prod->addChild(otherNode, 0, inputIdx);
-    otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
-    return prod;
-}
+std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode,
+            const IOIndex_t inputIdx,
+            const std::array<DimSize_t, DIM>& dims,
+            const std::string& extension);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <std::size_t DIM>
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 40b5d581d53521e6086d24c5ecc53f725dd9f252..cc714c4619a0f8eee7af03993700fed7489a6c0e 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -36,23 +36,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ReLU_Op(const ReLU_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ReLU_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ReLU_Op(const ReLU_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ReLU_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ReLU_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -65,9 +55,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
-}
+std::shared_ptr<Node> ReLU(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 000607c60e4e3c85671e70a941bd11f3427333dd..07beb0a39a88254f0aecdda35cd63f5d338af532 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -45,35 +45,19 @@ private:
 public:
     ReduceMean_Op() = delete;
 
-    ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ReduceMeanAttr::Axes>(axes),
-            attr<ReduceMeanAttr::KeepDims>(keep_dims)))
-    {}
+    ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ReduceMean_Op(const ReduceMean_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    ReduceMean_Op(const ReduceMean_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ReduceMean_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ReduceMean_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -101,27 +85,9 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> Node containing the Operator.
  */
-inline std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
+std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
                                         DimSize_t keep_dims=1,
-                                        const std::string& name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
-    return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims), name);
-
-}
-
-// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
-// template <DimSize_t DIM>
-// inline std::shared_ptr<Node> ReduceMean(
-//     std::int32_t const (&axes)[DIM],
-//     DimSize_t keep_dims = 1,
-//     const std::string& name = "") {
-//     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
-//     return ReduceMean(to_array(axes), keep_dims, name);
-// }
-
-// template <DimIdx_t DIM>
-// const std::string ReduceMean_Op::Type = "ReduceMean";
+                                        const std::string& name = "");
 
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 29a08c76c248018fff87a5f765a0b62cbd23b6b7..5bd9b3e8d56c106803bf65dc7bf595da85558a1a 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -47,38 +47,19 @@ private:
 public:
     Reshape_Op() = delete;
 
-    Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ReshapeAttr::Shape>(shape),
-            attr<ReshapeAttr::AllowZero>(allowzero)))
-    {
-        mImpl = std::make_shared<Reshape_OpImpl>(*this);
-    }
+    Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Reshape_Op(const Reshape_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Reshape_OpImpl>(*this);
-        }
-    }
+    Reshape_Op(const Reshape_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Reshape_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Reshape_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -97,12 +78,9 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
-                                     bool allowzero = false,
-                                   	 const std::string &name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape, allowzero), name);
-}
+std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
+                            bool allowzero = false,
+                            const std::string &name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 565affc57ae8e7b1838466733b0f5d8fa8e1a6d6..622a1ff1b191aad9f3f8045380be522d32cf2845 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -30,38 +30,20 @@ class Resize_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Resize_Op()
-        : OperatorTensor(Type,
-            {InputCategory::Data,
-                InputCategory::OptionalData,
-                InputCategory::OptionalData,
-                InputCategory::OptionalData},
-            1) {}
+    Resize_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
      * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-
-    Resize_Op(const Resize_Op& op)
-        : OperatorTensor(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Resize_Op, *this, op.backend());
-        }
-        else {
-            mImpl = nullptr;
-        }
-    }
+    Resize_Op(const Resize_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Resize_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Resize_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -77,10 +59,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Resize(const std::string &name = "") {
-
-    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
-}
+std::shared_ptr<Node> Resize(const std::string &name = "");
 
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 0683a26f6e9d8ef462c2af4693f372b43c33a144..311dc0202d866253bb98285e77e6d6ea8b345e0f 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -42,36 +42,19 @@ private:
 public:
     Scaling_Op() = delete;
 
-    Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ScalingAttr::ScalingFactor>(scalingFactor),
-            attr<ScalingAttr::QuantizedNbBits>(nbBits),
-            attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
-    {}
+    Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Scaling_Op(const Scaling_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Scaling_Op(const Scaling_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Scaling_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Scaling_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -93,13 +76,10 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
 }
 */
-inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
+std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
                                      std::size_t quantizedNbBits=8,
                                      bool isOutputUnsigned=true,
-                                     const std::string& name = "")
-{
-    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
-}
+                                     const std::string& name = "");
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 94f237726e79d8fe7824ff2c9b2f7640bbfc716f..d76a9fd069ebbda81e446e6f3486ff0ff66755bb 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -49,38 +49,19 @@ private:
 public:
     Shape_Op() = delete;
 
-    Shape_Op(const std::int64_t start, const std::int64_t end)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ShapeAttr::Start>(start),
-            attr<ShapeAttr::End>(end)))
-    {
-        mImpl = std::make_shared<Shape_OpImpl>(*this);
-    }
+    Shape_Op(const std::int64_t start, const std::int64_t end);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Shape_Op(const Shape_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Shape_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Shape_OpImpl>(*this);
-        }
-    }
+    Shape_Op(const Shape_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Shape_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Shape_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -98,9 +79,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
-}
+std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "");
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
index 879edcac6a7ed9a78a2db8d82994071a6cf09635..4d3000750c2224aaea278beca4c8124e0845042e 100644
--- a/include/aidge/operator/ShiftGELU.hpp
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -32,29 +32,19 @@ class ShiftGELU_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    ShiftGELU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    ShiftGELU_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ShiftGELU_Op(const ShiftGELU_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ShiftGELU_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ShiftGELU_Op(const ShiftGELU_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ShiftGELU_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ShiftGELU_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -67,9 +57,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> ShiftGELU(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<ShiftGELU_Op>(), name);
-}
+std::shared_ptr<Node> ShiftGELU(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SHIFTGELU_H_ */
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
index f171130213b2e51ca8fc9905d93944198f849ce7..d75e6559f5f4df9a1010d65ba97529e6165ae42f 100644
--- a/include/aidge/operator/ShiftMax.hpp
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -32,29 +32,19 @@ class ShiftMax_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    ShiftMax_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    ShiftMax_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ShiftMax_Op(const ShiftMax_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ShiftMax_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ShiftMax_Op(const ShiftMax_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ShiftMax_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ShiftMax_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -67,9 +57,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> ShiftMax(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<ShiftMax_Op>(), name);
-}
+std::shared_ptr<Node> ShiftMax(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SHIFTMAX_H_ */
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index ae82d4a3a2d29755bba22b9a4194284310ac4f84..b3204240cd130251fe8abe7d50bdad9b92b7558c 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -30,30 +30,11 @@ class Sigmoid_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Sigmoid_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    Sigmoid_Op();
 
-    /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
-     */
-    Sigmoid_Op(const Sigmoid_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Sigmoid_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Sigmoid_Op>(*this);
-    }
+    Sigmoid_Op(const Sigmoid_Op& op);
 
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -65,9 +46,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Sigmoid(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Sigmoid_Op>(), name);
-}
+std::shared_ptr<Node> Sigmoid(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 04a67fe98f7682737bff6df18f28d568ee33e093..241e165a0e441ccb856431225ce1d6fd170a25f8 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -45,14 +45,10 @@ private:
 public:
     Slice_Op() = delete;
 
-    Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>& ends, const std::vector<std::int8_t>& axes, const std::vector<std::int64_t>& steps)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<SliceAttr::Starts>(starts),
-            attr<SliceAttr::Ends>(ends),
-            attr<SliceAttr::Axes>(axes),
-            attr<SliceAttr::Steps>(steps)))
-    {}
+    Slice_Op(const std::vector<std::int64_t>& starts,
+            const std::vector<std::int64_t>& ends,
+            const std::vector<std::int8_t>& axes,
+            const std::vector<std::int64_t>& steps);
 
 
     /**
@@ -60,24 +56,14 @@ public:
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Slice_Op(const Slice_Op &op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Slice_Op, *this, op.backend());
-        }
-        else {
-            mImpl = nullptr;
-        }
-    }
+    Slice_Op(const Slice_Op &op);
 
 public:
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Slice_Op
      */
-    std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = true) override final;
@@ -104,13 +90,11 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
-inline std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
+std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
                                    const std::vector<std::int64_t>& ends = {},
                                    const std::vector<std::int8_t>& axes = {},
                                    const std::vector<std::int64_t>& steps = {},
-                                   const std::string &name = "") {
-    return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes, steps), name);
-}
+                                   const std::string &name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 0b7a8e57193439872c6fcc2699b9f5e55c643961..c221a67e31fc6de1bcb2c727854c8ebee2986ee4 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -42,34 +42,19 @@ private:
 public:
     Softmax_Op() = delete;
 
-    Softmax_Op(std::int32_t axis)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-                attr<SoftmaxAttr::Axis>(axis)))
-    {}
+    Softmax_Op(std::int32_t axis);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Softmax_Op(const Softmax_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    Softmax_Op(const Softmax_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Softmax_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Softmax_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -85,9 +70,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
-}
+std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "");
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 7bdec1579c8a8f46640de5caf42c01568d208059..661f9e32d47c7fb7e0c111805a50c6fcc131cffe 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -47,14 +47,7 @@ private:
 public:
     Split_Op() = delete;
 
-    Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<SplitAttr::Axis>(axis),
-            attr<SplitAttr::Split>(split)))
-    {
-        mImpl = std::make_shared<Split_OpImpl>(*this);
-    }
+    Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split);
 
 
     /**
@@ -62,23 +55,14 @@ public:
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Split_Op(const Split_Op &op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Split_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Split_OpImpl>(*this);
-        }
-    }
+    Split_Op(const Split_Op &op);
+
 public:
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Split_Op
      */
-    std::shared_ptr<Operator> clone() const override { return std::make_shared<Split_Op>(*this); }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -103,12 +87,10 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
-inline std::shared_ptr<Node> Split(DimSize_t nbOutput,
+std::shared_ptr<Node> Split(DimSize_t nbOutput,
                                    std::int8_t axis = 0,
                                    const std::vector<DimSize_t>& split = {},
-                                   const std::string &name = "") {
-    return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
-}
+                                   const std::string &name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 05b20286bc3f576d4e43fbece26ae270b3e583e6..ce4aaafc92d1f7d601946c02d4eb025eb735a3f9 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -14,8 +14,8 @@
 
 #include <memory>
 #include <vector>
+#include <string>
 
-#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -24,12 +24,9 @@
 namespace Aidge {
 
 class Sqrt_Op : public OperatorTensor,
-    public Registrable<Sqrt_Op, std::string, std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
+                public Registrable<Sqrt_Op,
+                                std::string,
+                                std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
 public:
     static const std::string Type;
 
@@ -39,23 +36,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Sqrt_Op(const Sqrt_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Sqrt_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    Sqrt_Op(const Sqrt_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Sqrt_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Sqrt_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -67,9 +54,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Sqrt(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
-}
+std::shared_ptr<Node> Sqrt(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SQRT_H_ */
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index ba5a021c30f13bbc2ae73c90078548c5b677a3a5..bb29ba67851bce8eed46ab1d4df3cf7a8ce91a1a 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -26,37 +26,23 @@ namespace Aidge {
 
 class Sub_Op : public OperatorTensor,
     public Registrable<Sub_Op, std::string, std::shared_ptr<OperatorImpl>(const Sub_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static const std::string Type;
 
+public:
     Sub_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Sub_Op(const Sub_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Sub_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Sub_Op(const Sub_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Sub_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Sub_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -71,9 +57,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Sub(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
-}
+std::shared_ptr<Node> Sub(const std::string& name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index b5f183a90aeeb4ef424c318e8942a818b568b44a..fd05bf7c434ec2547995800f47380c53585ca6d7 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -28,29 +28,19 @@ class Tanh_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Tanh_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    Tanh_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Tanh_Op(const Tanh_Op& op)
-        : OperatorTensor(op)
-    {
-       if (op.mImpl){
-            SET_IMPL_MACRO(Tanh_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Tanh_Op(const Tanh_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Tanh_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Tanh_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -63,9 +53,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Tanh(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Tanh_Op>(), name);
-}
+std::shared_ptr<Node> Tanh(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_TANH_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index efd9e1792d530f45754809913a7c648d82c7985e..375d6e098324516b750f8054f9214390373737e2 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -50,37 +50,19 @@ private:
 public:
     Transpose_Op() = delete;
 
-    Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)))
-    {
-        mImpl = std::make_shared<TransposeImpl>(*this);
-    }
+    Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Transpose_Op(const Transpose_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<TransposeImpl>(*this);
-        }
-    }
+    Transpose_Op(const Transpose_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Transpose_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Transpose_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -97,10 +79,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
-                                           const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
-}
+std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
+                                           const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 58cbcd2d756ad44ef2ec6a38d46909a114b187c2..3fda7c21405ef023f4324089e60be0330b5f34b6 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -57,42 +57,22 @@ private:
 public:
     Unfold_Op() = delete;
 
-    constexpr Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims,
-                      const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<UnfoldAttr::StrideDims>(strideDims),
-            attr<UnfoldAttr::DilationDims>(dilationDims),
-            attr<UnfoldAttr::KernelDims>(kernelDims)))
-    {
-        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);  
-    }
+    Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims,
+            const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+            const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Unfold_Op(const Unfold_Op<DIM> &op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);  
-        }
-    }
+    Unfold_Op(const Unfold_Op<DIM> &op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Unfold_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Unfold_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -112,14 +92,10 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Unfold(const std::array<DimSize_t, DIM> &kernelDims,
+std::shared_ptr<Node> Unfold(const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
-    return std::make_shared<Node>(std::make_shared<Unfold_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
-}
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
 
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> Unfold(
diff --git a/src/graph/Matching.cpp b/src/graph/Matching.cpp
index a840b6ab552d71990b796d741d3ca56b07c7c0be..b93ac16a9384d9b6ec8b62124136cb5085268d58 100644
--- a/src/graph/Matching.cpp
+++ b/src/graph/Matching.cpp
@@ -2,6 +2,33 @@
 
 #include <fmt/color.h>
 
+Aidge::SinglePassGraphMatching::Context::Context() = default;
+Aidge::SinglePassGraphMatching::Context::Context(const Context& other) = default;
+Aidge::SinglePassGraphMatching::Context& Aidge::SinglePassGraphMatching::Context::operator=(const Context& other) = default;
+Aidge::SinglePassGraphMatching::Context::~Context() = default;
+
+////////////////////////////////////////////////////////////
+
+Aidge::SinglePassGraphMatching::MatchingResult::MatchingResult() : graph(std::make_shared<GraphView>()), startNode(nullptr) {}
+Aidge::SinglePassGraphMatching::MatchingResult::MatchingResult(const Aidge::SinglePassGraphMatching::MatchingResult& other) {
+    graph = std::make_shared<GraphView>(*(other.graph.get()));
+    anchors = other.anchors;
+    startNode = other.startNode;
+}
+Aidge::SinglePassGraphMatching::MatchingResult& Aidge::SinglePassGraphMatching::MatchingResult::operator=(const Aidge::SinglePassGraphMatching::MatchingResult& other) {
+    graph = std::make_shared<GraphView>(*(other.graph.get()));
+    anchors = other.anchors;
+    startNode = other.startNode;
+    return *this;
+}
+Aidge::SinglePassGraphMatching::MatchingResult::~MatchingResult() noexcept = default;
+
+//////////////////////////////////////////////////////////
+
+Aidge::SinglePassGraphMatching::SinglePassGraphMatching(const Aidge::SinglePassGraphMatching& other) = default;
+Aidge::SinglePassGraphMatching& Aidge::SinglePassGraphMatching::operator=(const Aidge::SinglePassGraphMatching& other) = default;
+Aidge::SinglePassGraphMatching::~SinglePassGraphMatching() noexcept = default;
+
 std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphMatching::match(const std::string& query, bool disjoint) {
     Context ctx;
     ctx.query = query;
@@ -104,7 +131,7 @@ bool Aidge::SinglePassGraphMatching::matchNodeOrBlock(Context& ctx, std::set<Mat
         newCtx.query.erase(0, 1);
 
         removeWhiteSpace(newCtx.query);
-        const auto endQuantity = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        const auto endQuantity = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return !isdigit(c); });
         if (endQuantity != newCtx.query.begin()) {
             matchQuantity = std::stoi(newCtx.query.substr(0, endQuantity - newCtx.query.begin()));
@@ -401,7 +428,7 @@ bool Aidge::SinglePassGraphMatching::matchEdge(Context& ctx, std::set<MatchingRe
     // optional first IO_INDEX
     int firstIdx = 0;
     bool foundFirst = false;
-    const auto endOutputIdx = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+    const auto endOutputIdx = std::find_if(newCtx.query.begin(), newCtx.query.end(),
         [](char c) { return !isdigit(c); });
     if (endOutputIdx != newCtx.query.begin()) {
         firstIdx = std::stoi(newCtx.query.substr(0, endOutputIdx - newCtx.query.begin()));
@@ -421,7 +448,7 @@ bool Aidge::SinglePassGraphMatching::matchEdge(Context& ctx, std::set<MatchingRe
         auto query = newCtx.query;
         query.erase(0, 1); // drop '-'
 
-        const auto endInputIdx = std::find_if(query.begin(), query.end(), 
+        const auto endInputIdx = std::find_if(query.begin(), query.end(),
             [](char c) { return !isdigit(c); });
         if (endInputIdx != query.begin()) {
             secondIdx = std::stoi(query.substr(0, endInputIdx - query.begin()));
@@ -500,7 +527,7 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     }
     else {
         // TYPE
-        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return (!isalnum(c) && c != '_'); });
 
         if (endIdentifier == newCtx.query.begin()) {
@@ -519,7 +546,7 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
         newCtx.query.erase(0, 1); // drop '#'
 
         // ANCHOR
-        const auto endAnchor = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        const auto endAnchor = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return (!isalnum(c) && c != '_'); });
         anchor = "#" + newCtx.query.substr(0, endAnchor - newCtx.query.begin());
         newCtx.query = newCtx.query.substr(endAnchor - newCtx.query.begin());
@@ -532,7 +559,7 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
         newCtx.query.erase(0, 1);
 
         // LAMBDA
-        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return (!isalnum(c) && c != '_'); });
 
         if (endIdentifier == newCtx.query.begin()) {
diff --git a/src/graph/OpArgs.cpp b/src/graph/OpArgs.cpp
index e1a378c3db0d79d7816e9882f790540cdc26cd88..cffd14c35a0fe11055198236eba6e344c0ff782c 100644
--- a/src/graph/OpArgs.cpp
+++ b/src/graph/OpArgs.cpp
@@ -13,12 +13,15 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
 
+Aidge::OpArgs::OpArgs(const OpArgs&) = default;
+Aidge::OpArgs& Aidge::OpArgs::operator=(const OpArgs&) = default;
+Aidge::OpArgs::~OpArgs() noexcept = default;
 
 std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs) {
     std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
     for (const OpArgs& elt : inputs) {
         if(elt.node() != nullptr) {
-            // Connect the first output (ordered) of each output node (ordered) 
+            // Connect the first output (ordered) of each output node (ordered)
             // to the next available input of the input node.
             AIDGE_ASSERT(static_cast<std::size_t>(elt.node()->getNbFreeDataInputs()) >= gv->outputNodes().size(),
                 "Sequential(): not enough free data inputs ({}) for input node {} (of type {}) to connect to all previous output nodes ({})",
@@ -33,7 +36,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs)
             gv->add(elt.node());
         }
         else {
-            // For each input node, connect the first output (ordered) of each 
+            // For each input node, connect the first output (ordered) of each
             // output node (ordered) to the next available input
             std::set<NodePtr> connectedInputs;
             for (const auto& node_in : elt.view()->getOrderedInputs()) {
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 57ece07152613b831675cdecd6526d4ab26af5cb..f9dc3335a3b62e87edf33d25c5a516a63c4129a0 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -22,6 +22,14 @@
 
 const std::string Aidge::Add_Op::Type = "Add";
 
+Aidge::Add_Op::Add_Op(const IOIndex_t nbIn)
+    : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
+{
+    if (nbIn == 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
+    }
+}
+
 Aidge::Add_Op::Add_Op(const Add_Op& op)
     : OperatorTensor(op)
 {
@@ -32,6 +40,10 @@ Aidge::Add_Op::Add_Op(const Add_Op& op)
     }
 }
 
+std::shared_ptr<Aidge::Operator> Aidge::Add_Op::clone() const {
+    return std::make_shared<Add_Op>(*this);
+}
+
 bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
@@ -71,4 +83,8 @@ bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(Add_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+std::shared_ptr<Aidge::Node> Aidge::Add(const IOIndex_t nbIn, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
 }
\ No newline at end of file
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 53ffb93269e79c0ba940f1fb0d3d94cb494ad8ce..db06d8486ed3b6f3dc0dd62615dcc18ed09b3d9e 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -26,6 +26,7 @@
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
 
+
 template <Aidge::DimIdx_t DIM>
 Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
     : OperatorTensor(op),
@@ -38,6 +39,11 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::AvgPooling_Op<DIM>::clone() const {
+    return std::make_shared<AvgPooling_Op<DIM>>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -110,4 +116,15 @@ void Aidge::AvgPooling_Op<DIM>::setBackend(const std::string &name, Aidge::Devic
 template class Aidge::AvgPooling_Op<1>;
 template class Aidge::AvgPooling_Op<2>;
 template class Aidge::AvgPooling_Op<3>;
-template class Aidge::AvgPooling_Op<4>;
\ No newline at end of file
+template class Aidge::AvgPooling_Op<4>;
+
+////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name,
+                                           const std::array<Aidge::DimSize_t, DIM> &stride_dims) {
+    AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", AvgPooling_Op<DIM>::Type);
+    return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
+}
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index 98e5c2da20fc35e18d4fd69a79cf1d87ec9d60ca..a81cfc132773134889a5164762091229759b4f38 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -38,6 +38,11 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::BatchNorm_Op<DIM>::clone() const {
+    return std::make_shared<BatchNorm_Op<DIM>>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -95,7 +100,7 @@ template class Aidge::BatchNorm_Op<3>;
 template class Aidge::BatchNorm_Op<4>;
 
 template <Aidge::DimSize_t DIM>
-inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const DimSize_t nbFeatures,
+inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const Aidge::DimSize_t nbFeatures,
                                        const float epsilon,
                                        const float momentum,
                                        const std::string& name) {
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index 8df153a67d2214e4435d9fa0aac6e74d53e11b12..b6164a77cb47e0b9127fa4b02255ed0991805fe7 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -46,3 +46,7 @@ void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+std::shared_ptr<Aidge::Node> Aidge::Cast(const Aidge::DataType targetType, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
+}
\ No newline at end of file
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 4649a954a095d239dbe7de7bcbebf1025a3b22c6..c78afa8665322a9cbca42a3326d527c1ebd949d4 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -18,6 +18,35 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+const std::string Aidge::Concat_Op::Type = "Concat";
+
+Aidge::Concat_Op::Concat_Op(const Aidge::IOIndex_t nbIn, const std::int32_t axis)
+    : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ConcatAttr::Axis>(axis)))
+{
+    if (nbIn == 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
+    }
+    mImpl = std::make_shared<Concat_OpImpl>(*this);
+}
+
+Aidge::Concat_Op::Concat_Op(const Aidge::Concat_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Concat_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Concat_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Concat_Op::clone() const {
+    return std::make_shared<Concat_Op>(*this);
+}
+
 void Aidge::Concat_OpImpl::forward() {
     const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
     const DimSize_t axis = op.axis();
@@ -56,7 +85,6 @@ void Aidge::Concat_OpImpl::forward() {
     }
 }
 
-const std::string Aidge::Concat_Op::Type = "Concat";
 
 bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
     if (!inputsAssociated()) {
@@ -105,3 +133,9 @@ void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Concat(const Aidge::IOIndex_t nbIn, const std::int32_t axis, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
+}
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index a33af78779971e77da4f4e910b89b9263a1af5d6..5f9ecff9494b309e0a50fa3fc457d899f7cd2318 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -158,4 +158,26 @@ void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
 }
 
 template class Aidge::Conv_Op<1>;
-template class Aidge::Conv_Op<2>;
\ No newline at end of file
+template class Aidge::Conv_Op<2>;
+
+/////////////////////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Conv(Aidge::DimSize_t inChannels,
+                                  Aidge::DimSize_t outChannels,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilationDims,
+                                  bool noBias) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+    addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
+    if (!noBias) {
+        addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
+    }
+    return conv;
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Conv<2>(Aidge::DimSize_t, Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index 342fd86195d5c2e85a63d990c4ebbb75e7f50a6b..109a122d79354d6cd32d60fd7838756d46ff0215 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -157,4 +157,25 @@ void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::De
 }
 
 template class Aidge::ConvDepthWise_Op<1>;
-template class Aidge::ConvDepthWise_Op<2>;
\ No newline at end of file
+template class Aidge::ConvDepthWise_Op<2>;
+
+////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise(const Aidge::DimSize_t nbChannels,
+                                           const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                           const std::string& name,
+                                           const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                           const std::array<Aidge::DimSize_t, DIM> &dilationDims,
+                                           bool noBias) {
+    // FIXME: properly handle default w&b initialization in every cases
+    AIDGE_ASSERT(DIM<=MaxDim,"Too many kernel dimensions required by {}, not supported", ConvDepthWise_Op<DIM>::Type);
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+    addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
+    if (!noBias) {
+        addProducer(convDW, 2, {nbChannels}, "b");
+    }
+    return convDW;
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise<2>(Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 387a9516077a937cca5c20ad091547b7f1c5be6f..2140b17a3abee329effaae63fada187fc522495f 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -56,3 +56,9 @@ void Aidge::Div_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Div_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+///////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Div(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp
index 81c87f10b10210c2af203a05df53e3330bb33b72..ed1f79f79a3011f72da1a1804d84960595f880c0 100644
--- a/src/operator/Erf.cpp
+++ b/src/operator/Erf.cpp
@@ -19,7 +19,27 @@
 
 const std::string Aidge::Erf_Op::Type = "Erf";
 
+Aidge::Erf_Op::Erf_Op(const Aidge::Erf_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Erf_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Erf_Op::clone() const {
+    return std::make_shared<Erf_Op>(*this);
+}
+
 void Aidge::Erf_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Erf_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Erf(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Erf_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 44d499bc7e125c757f802e086c22e1e6c72e9216..577a1842d76d3f58763ccd598205935e2c6d6eb4 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -23,6 +23,10 @@
 
 const std::string Aidge::FC_Op::Type = "FC";
 
+std::shared_ptr<Aidge::Operator> Aidge::FC_Op::clone() const {
+    return std::make_shared<FC_Op>(*this);
+}
+
 void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs());
     AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
@@ -86,3 +90,16 @@ void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device
         getInput(2)->setBackend(name, device);
     }
 }
+
+std::shared_ptr<Aidge::Node> Aidge::FC(const Aidge::DimSize_t inChannels,
+                                       const Aidge::DimSize_t outChannels,
+                                       bool noBias,
+                                       const std::string& name) {
+    // FIXME: properly handle default w&b initialization in every cases
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
+    addProducer(fc, 1, {outChannels, inChannels}, "w");
+    if (!noBias) {
+        addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
+    }
+    return fc;
+}
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index abe73e54ede0611cb14e24332302c35afa91c2a9..79ea0cea196e73c36ff5002c812f1dd19a3572b3 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -26,6 +26,24 @@
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::Fold_Op<DIM>::Type = "Fold";
 
+template <Aidge::DimIdx_t DIM>
+Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
+    }
+    else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::Fold_Op<DIM>::clone() const {
+    return std::make_shared<Fold_Op<DIM>>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -64,4 +82,17 @@ void Aidge::Fold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
-template class Aidge::Fold_Op<2>;
\ No newline at end of file
+template class Aidge::Fold_Op<2>;
+
+///////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Fold(const std::array<Aidge::DimSize_t, DIM> &outputDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilationDims) {
+    // FIXME: properly handle default w&b initialization in every cases
+    AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by Fold, not supported", Fold_Op<DIM>::Type);
+    return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
+}
\ No newline at end of file
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index cd3c4357434ec4b49b6ea05e0d2633adfee7bfd0..00d471f6dc3e1417e4b343002b12a26260030d30 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -20,6 +20,36 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
+const std::string Aidge::Gather_Op::Type = "Gather";
+
+
+Aidge::Gather_Op::Gather_Op(std::int8_t axis,
+              const std::vector<int64_t>& indices,
+              const std::vector<Aidge::DimSize_t>& gatheredShape)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<GatherAttr::Axis>(axis),
+        attr<GatherAttr::Indices>(indices),
+        attr<GatherAttr::GatheredShape>(gatheredShape)))
+{
+    mImpl = std::make_shared<Gather_OpImpl>(*this);
+}
+
+Aidge::Gather_Op::Gather_Op(const Aidge::Gather_Op& op)
+    : OperatorTensor(op), mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Gather_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Gather_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Gather_Op::clone() const {
+    return std::make_shared<Gather_Op>(*this);
+}
+
 void Aidge::Gather_OpImpl::forward() {
     const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
 
@@ -48,8 +78,6 @@ void Aidge::Gather_OpImpl::forward() {
     }
 }
 
-const std::string Aidge::Gather_Op::Type = "Gather";
-
 bool Aidge::Gather_Op::dimsForwarded() const {
     if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
@@ -113,3 +141,12 @@ void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+/////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Gather(std::int8_t axis,
+                                        const std::vector<int64_t>& indices,
+                                        const std::vector<Aidge::DimSize_t>& gatheredShape,
+                                        const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Gather_Op>(axis, indices, gatheredShape), name);
+}
\ No newline at end of file
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index d49e1f0838f623bca1546e54ea4f4e470d70e1c5..e8c66085de5bc7c808b7f2307a9a82b22a426bb2 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -18,6 +18,42 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+
+Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
+                                            const std::vector<Aidge::InputCategory>& inputsCategory,
+                                            Aidge::IOIndex_t nbOut)
+    : OperatorTensor(type, inputsCategory, nbOut)
+{
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
+
+Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
+                                            Aidge::IOIndex_t nbData,
+                                            Aidge::IOIndex_t nbParam,
+                                            Aidge::IOIndex_t nbOut)
+    : OperatorTensor(type, [nbData, nbParam]() {
+                            std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
+                            inputsCategory.resize(nbData + nbParam, InputCategory::Param);
+                            return inputsCategory;
+                        }(), nbOut),
+        mAttributes(std::make_shared<DynamicAttributes>())
+{
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
+
+Aidge::GenericOperator_Op::GenericOperator_Op(const Aidge::GenericOperator_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
+{
+    mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
+}
+
+Aidge::GenericOperator_Op::~GenericOperator_Op() noexcept = default;
+
+std::shared_ptr<Aidge::Operator> Aidge::GenericOperator_Op::clone() const {
+    return std::make_shared<GenericOperator_Op>(*this);
+}
+
 const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Identity
     = [](const std::vector<std::vector<std::size_t>>& inputsDims) { return inputsDims; };
 
@@ -55,3 +91,20 @@ void Aidge::GenericOperator_Op::setBackend(const std::string & name, DeviceIdx_t
         mOutputs[i]->setBackend(name, device);
     }
 }
+
+///////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
+                                            const std::vector<Aidge::InputCategory>& inputCategory,
+                                            Aidge::IOIndex_t nbOut,
+                                            const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, inputCategory, nbOut), name);
+}
+
+std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
+                                                Aidge::IOIndex_t nbData,
+                                                Aidge::IOIndex_t nbParam,
+                                                Aidge::IOIndex_t nbOut,
+                                                const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
+}
\ No newline at end of file
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index 1632c8a7677c884194494269e1a8cd93e7ef7822..e7b2bdffb979fe377de5c7bd1e86147874e7d043 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -21,6 +21,20 @@
 
 const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling";
 
+Aidge::GlobalAveragePooling_Op::GlobalAveragePooling_Op(const Aidge::GlobalAveragePooling_Op &op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::GlobalAveragePooling_Op::clone() const {
+    return std::make_shared<GlobalAveragePooling_Op>(*this);
+}
+
 bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
@@ -41,4 +55,10 @@ bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
-}
\ No newline at end of file
+}
+
+////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::GlobalAveragePooling(const std::string &name) {
+  return std::make_shared<Node>(std::make_shared<GlobalAveragePooling_Op>(), name);
+}
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index 2b8107bfc77ef70b33a97032d350a42ec5f3f466..2f60eb2fd9c5d43c60ae7ee3af49c3b2e407a1fe 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -15,8 +15,35 @@
 
 const std::string Aidge::Identity_Op::Type = "Identity";
 
+Aidge::Identity_Op::Identity_Op()
+    : OperatorTensor(Type, {InputCategory::Data}, 1)
+{
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
+
+Aidge::Identity_Op::Identity_Op(const Aidge::Identity_Op& op)
+    : OperatorTensor(op)
+{
+    mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Identity_Op::clone() const {
+    return std::make_shared<Identity_Op>(*this);
+}
+
+bool Aidge::Identity_Op::dimsForwarded() const {
+    const auto& input0 = getInput(0);
+    return input0 ? (input0->undefined() ? false :
+                            input0->dims() == getOutput(0)->dims()) :
+                                false;
+}
+
 void Aidge::Identity_Op::forward() {
     // Perform a shallow copy
     *(mOutputs[0]) = *(mInputs[0]);
     runHooks();
 }
+
+std::shared_ptr<Aidge::Node> Aidge::Identity(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Identity_Op>(), name);
+}
diff --git a/src/operator/LeakyReLU.cpp b/src/operator/LeakyReLU.cpp
index 32e050ee1595cf83b5cd0ffbfeba6153dc2243af..9def23758d5f779f14dec2ee19199fe0f48c4980 100644
--- a/src/operator/LeakyReLU.cpp
+++ b/src/operator/LeakyReLU.cpp
@@ -9,8 +9,37 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/LeakyReLU.hpp"
+
+#include <memory>
 #include <string>
 
-#include "aidge/operator/LeakyReLU.hpp"
+#include "aidge/data/Tensor.hpp"
+
+const std::string Aidge::LeakyReLU_Op::Type = "LeakyReLU";
+
+Aidge::LeakyReLU_Op::LeakyReLU_Op(const Aidge::LeakyReLU_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::LeakyReLU_Op::clone() const {
+    return std::make_shared<LeakyReLU_Op>(*this);
+}
+
+void Aidge::LeakyReLU_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+/////////////////////////////////////
 
-const std::string Aidge::LeakyReLU_Op::Type = "LeakyReLU";
\ No newline at end of file
+std::shared_ptr<Aidge::Node> Aidge::LeakyReLU(float negativeSlope, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
+}
\ No newline at end of file
diff --git a/src/operator/Ln.cpp b/src/operator/Ln.cpp
index 7e9f9ad01186f53a0f89657acb72f6a544223068..61ff0dc524bdc025dddd4be1b45bf507fe4f718e 100755
--- a/src/operator/Ln.cpp
+++ b/src/operator/Ln.cpp
@@ -19,7 +19,27 @@
 
 const std::string Aidge::Ln_Op::Type = "Ln";
 
+Aidge::Ln_Op::Ln_Op(const Aidge::Ln_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Ln_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Ln_Op::clone() const {
+    return std::make_shared<Ln_Op>(*this);
+}
+
 void Aidge::Ln_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     mImpl = Registrar<Ln_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
 }
+
+/////////////////////////////////
+
+inline std::shared_ptr<Aidge::Node> Aidge::Ln(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Ln_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 207229b93b0ae362f42c1bae6fb1455b5a2b9d3d..c95fe544cbd29f715e8bd7caae58193deaac6331 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -20,6 +20,20 @@
 
 const std::string Aidge::MatMul_Op::Type = "MatMul";
 
+Aidge::MatMul_Op::MatMul_Op(const Aidge::MatMul_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(MatMul_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::MatMul_Op::clone() const {
+    return std::make_shared<MatMul_Op>(*this);
+}
+
 bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated(false)) {
         if (getInput(0)->empty() && getInput(1)->empty()) {
@@ -82,3 +96,9 @@ void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     SET_IMPL_MACRO(MatMul_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::MatMul(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..85f2dd930f2d35b9d9e9ea597b588637a56cb952
--- /dev/null
+++ b/src/operator/MaxPooling.cpp
@@ -0,0 +1,104 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MaxPooling.hpp"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling";
+
+template <Aidge::DimIdx_t DIM>
+Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                            const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                            bool ceil_mode)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+    attr<MaxPoolingAttr::StrideDims>(stride_dims),
+    attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+    attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
+{}
+
+template <Aidge::DimIdx_t DIM>
+Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const Aidge::MaxPooling_Op<DIM>& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::MaxPooling_Op<DIM>::clone() const {
+    return std::make_shared<MaxPooling_Op<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::MaxPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        std::array<DimSize_t, DIM + 2> outputDims{};
+        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+
+        std::function<float(float)> roundingFunction;
+        if (mAttributes->template getAttr<MaxPoolingAttr::CeilMode>()) {
+            roundingFunction = [](float x) { return std::ceil(x); };
+        } else {
+            roundingFunction = [](float x) { return std::floor(x); };
+        }
+
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
+            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                                        roundingFunction(static_cast<float>(inputDims[dim+2] -
+                                                                mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
+                                        static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
+        }
+        outputDims[1] = inputDims[1];
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+    return false;
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::MaxPooling_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+template class Aidge::MaxPooling_Op<1>;
+template class Aidge::MaxPooling_Op<2>;
+template class Aidge::MaxPooling_Op<3>;
+
+///////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::MaxPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name,
+                                           const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                           bool ceil_mode)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, ceil_mode), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&, bool);
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 88a182f2ae7d51abb059faa64058fb701a033b56..f713fdaad793aebebf5047d4ebf1dfd5aca10cd1 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -75,6 +75,33 @@ void Aidge::Memorize_OpImpl::forward() {
 
 const std::string Aidge::Memorize_Op::Type = "Memorize";
 
+Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
+        mAttributes(std::make_shared<Attributes_>(
+                    attr<MemorizeAttr::ScheduleStep>(0),
+                    attr<MemorizeAttr::ForwardStep>(0),
+                    attr<MemorizeAttr::EndStep>(endStep)))
+{
+    mOutputs[1] = mOutputs[0];
+}
+
+Aidge::Memorize_Op::Memorize_Op(const Aidge::Memorize_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+    mOutputs[1] = mOutputs[0];
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Memorize_Op::clone() const {
+    return std::make_shared<Memorize_Op>(*this);
+}
+
+
 void Aidge::Memorize_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
     ++mAttributes->template getAttr<MemorizeAttr::ScheduleStep>();
@@ -125,3 +152,9 @@ void Aidge::Memorize_Op::forward() {
     ++mAttributes->template getAttr<MemorizeAttr::ForwardStep>();
     mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
 }
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Memorize(const std::uint32_t endStep, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
+}
\ No newline at end of file
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index e7c50033797c7c984b6b8da69d30f005bc69e70c..71e3a4781569820267b7d623da8d73134692c05d 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -38,6 +38,10 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar
     }
 }
 
+std::shared_ptr<Aidge::Operator> Aidge::MetaOperator_Op::clone() const {
+    return std::make_shared<MetaOperator_Op>(*this);
+}
+
 void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) {
     AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
     AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
@@ -59,6 +63,18 @@ void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
 }
 
+void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    if (Registrar<MetaOperator_Op>::exists({name, type()})) {
+        // A custom implementation exists for this meta operator
+        mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
+    }
+
+    // The micro-graph should always be set to the right backend, since it
+    // shares input/output tensors.
+    // Input/output tensors backend are updated here.
+    mGraph->setBackend(name, device);
+}
+
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
     if (mImpl) {
         return mImpl->getNbRequiredData(inputIdx);
@@ -182,3 +198,15 @@ void Aidge::MetaOperator_Op::forward() {
         mScheduler->forward(false);
     }
 }
+
+/////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::MetaOperator(const char *type,
+                                  const std::shared_ptr<Aidge::GraphView>& graph,
+                                  const std::string& name)
+{
+    auto op = std::make_shared<MetaOperator_Op>(type, graph);
+    auto node = std::make_shared<Node>(op, name);
+    op->setUpperNode(node);
+    return node;
+}
\ No newline at end of file
diff --git a/src/operator/MetaOperatorDefs/PaddedConv.cpp b/src/operator/MetaOperatorDefs/PaddedConv.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..fddd57d0c280f57cb20c93a9d14ce9897c0c5191
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/PaddedConv.cpp
@@ -0,0 +1,74 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConv(Aidge::DimSize_t in_channels,
+                                  Aidge::DimSize_t out_channels,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+                                  bool no_bias)
+{
+    auto metaOp = std::make_shared<Node>(PaddedConv_Op<DIM>(kernel_dims, stride_dims, padding_dims, dilation_dims), name);
+    std::static_pointer_cast<MetaOperator_Op>(metaOp->getOperator())->getMicroGraph()->setNodesName();
+    addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
+    if (!no_bias) {
+        addProducer(metaOp, 2, {out_channels}, "b");
+    }
+    return metaOp;
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<2>(const Aidge::DimSize_t, const Aidge::DimSize_t, const std::array<Aidge::DimSize_t,2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op(
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims)
+{
+    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
+
+    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
+}
+template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+template <Aidge::DimSize_t DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConv(
+    Aidge::DimSize_t in_channels,
+    Aidge::DimSize_t out_channels,
+    Aidge::DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name,
+    const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+    const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+    const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+    bool no_bias)
+{
+    return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<2>(const Aidge::DimSize_t, const Aidge::DimSize_t, const Aidge::DimSize_t (&)[2], const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2b0e12d9cb6389554d3d0fb4d20fe3995bf909e9
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
@@ -0,0 +1,74 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(const Aidge::DimSize_t nb_channels,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+                                  bool no_bias)
+{
+    auto metaOp = std::make_shared<Node>(PaddedConvDepthWise_Op<DIM>(kernel_dims, stride_dims, padding_dims, dilation_dims), name);
+    std::static_pointer_cast<MetaOperator_Op>(metaOp->getOperator())->getMicroGraph()->setNodesName();
+    addProducer(metaOp, 1, append(nb_channels, append(Aidge::DimSize_t(1), kernel_dims)), "w");
+    if (!no_bias) {
+        addProducer(metaOp, 2, {nb_channels}, "b");
+    }
+    return metaOp;
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<2>(const Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
+
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op(
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims)
+{
+    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
+
+    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
+}
+template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
+
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+template <Aidge::DimSize_t DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(
+    const Aidge::DimSize_t nb_channels,
+    Aidge::DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name,
+    const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+    const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+    const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+    bool no_bias)
+{
+    return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<2>(const Aidge::DimSize_t, const Aidge::DimSize_t (&)[2], const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index 0f635ea655676e488343bb55d9de6423a997af7d..4190c10a06458036f2cd8953156b969afa51bebf 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -19,6 +19,27 @@ void Aidge::Move_OpImpl::forward() {
 
 const std::string Aidge::Move_Op::Type = "Move";
 
+Aidge::Move_Op::Move_Op()
+    : OperatorTensor(Type, {InputCategory::Data}, 1)
+{
+    mImpl = std::make_shared<Move_OpImpl>(*this);
+}
+
+Aidge::Move_Op::Move_Op(const Aidge::Move_Op& op)
+    : OperatorTensor(op)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Move_Op, *this, {op.getInput(0)->getImpl()->backend(), op.backend()});
+    }
+    else {
+        mImpl = std::make_shared<Move_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Move_Op::clone() const {
+    return std::make_shared<Move_Op>(*this);
+}
+
 void Aidge::Move_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Move_Op>::exists({mInputs[0]->getImpl()->backend(), name})) {
         SET_IMPL_MACRO(Move_Op, *this, {mInputs[0]->getImpl()->backend(), name});
@@ -28,3 +49,9 @@ void Aidge::Move_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Move(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Move_Op>(), name);
+}
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index ded67a11acd299e5407f0d7e74146f5bcd1bf86a..e2e32805f6fde7ab6831fe4756ca60ad42c3925a 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -23,6 +23,20 @@
 
 const std::string Aidge::Mul_Op::Type = "Mul";
 
+Aidge::Mul_Op::Mul_Op(const Aidge::Mul_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Mul_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Mul_Op::clone() const {
+    return std::make_shared<Mul_Op>(*this);
+}
+
 bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
@@ -56,3 +70,9 @@ void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Mul_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+///////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Mul(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
index c66e6c84af6df299e4786bbbb73767d6ee6374f5..5b1428c160f976a043bb5cbe6fc6cb3351bab336 100644
--- a/src/operator/Pad.cpp
+++ b/src/operator/Pad.cpp
@@ -10,10 +10,62 @@
  ********************************************************************************/
 
 #include "aidge/operator/Pad.hpp"
+
+#include <array>
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::Pad_Op<DIM>::Type = "Pad";
 
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::Pad_Op<DIM>::clone() const {
+    return std::make_shared<Pad_Op<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::Pad_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        std::array<DimSize_t, DIM + 2> outputDims{};
+        const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
+
+        for (std::size_t dim = 0; dim < DIM; ++dim) {
+            outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
+                                + inputDims[dim+2]
+                                + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
+        }
+        outputDims[1] = inputDims[1];
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+
+    return false;
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Pad_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
 template class Aidge::Pad_Op<1>;
 template class Aidge::Pad_Op<2>;
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Pad(const std::array<Aidge::DimSize_t, 2*DIM> &beginEndTuples,
+                                           const std::string& name,
+                                           const PadBorderType &borderType,
+                                           double borderValue)
+{
+    AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", Pad_Op<DIM>::Type);
+    return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Pad<1>(const std::array<Aidge::DimSize_t, 2> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
+template std::shared_ptr<Aidge::Node> Aidge::Pad<2>(const std::array<Aidge::DimSize_t, 4> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
+template std::shared_ptr<Aidge::Node> Aidge::Pad<3>(const std::array<Aidge::DimSize_t, 6> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
\ No newline at end of file
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 2fcc46a460ffd7c7f6746dfcd108acbaafe912de..5d32a06fd01d8674d8e072f14838f3fd80d1f30a 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -35,8 +35,33 @@ void Aidge::Pop_OpImpl::forward() {
     *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()});
 }
 
+//////////////////////////////////////////////////////////
+
 const std::string Aidge::Pop_Op::Type = "Pop";
 
+Aidge::Pop_Op::Pop_Op()
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0)))
+{
+    mImpl = std::make_shared<Pop_OpImpl>(*this);
+}
+
+Aidge::Pop_Op::Pop_Op(const Aidge::Pop_Op& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Pop_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Pop_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Pop_Op::clone() const {
+    return std::make_shared<Pop_Op>(*this);
+}
+
 bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         auto inputDims = getInput(0)->dims();
@@ -67,3 +92,9 @@ void Aidge::Pop_Op::forward() {
     Operator::forward();
     ++mAttributes->template getAttr<PopAttr::ForwardStep>();
 }
+
+///////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Pop(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Pop_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 2a50f9c7bad1e40cd6e69cfc0a22632439cfe000..1602c8c2aa28e305b340888cb3a77cb4d2fc4293 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -54,4 +54,10 @@ bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Pow_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Pow(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index bdb69452ec54fb635d0cbc299336071295f37ae1..e5c4a3e9e18af8b3236b612db2b959f5ce4ec30a 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -26,6 +26,17 @@
 
 const std::string Aidge::Producer_Op::Type = "Producer";
 
+template <std::size_t DIM>
+Aidge::Producer_Op::Producer_Op(
+            const std::array<Aidge::DimSize_t, DIM>& dims,
+            bool constant)
+    : OperatorTensor(Type, {}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ProdAttr::Constant>(constant)))
+{
+    mOutputs[0]->resize(dims);
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
 
 Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant)
     : OperatorTensor(Type, {}, 1),
@@ -59,6 +70,10 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     }
 }
 
+std::shared_ptr<Aidge::Operator> Aidge::Producer_Op::clone() const {
+    return std::make_shared<Producer_Op>(*this);
+}
+
 void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Producer_Op>::exists({name})){
         SET_IMPL_MACRO(Producer_Op, *this, name);
@@ -76,3 +91,75 @@ void Aidge::Producer_Op::forward() {
 
     runHooks();
 }
+
+void Aidge::Producer_Op::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const {
+    if (mAttributes->template getAttr<ProdAttr::Constant>()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
+    }
+    OperatorTensor::setOutput(outputIdx, data);
+}
+
+/////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Producer(const std::array<Aidge::DimSize_t, DIM> &dims,
+        const std::string& name,
+        bool constant)
+{
+  static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
+  return std::make_shared<Node>(std::make_shared<Producer_Op>(dims, constant), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Producer<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<4>(const std::array<Aidge::DimSize_t, 4>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<5>(const std::array<Aidge::DimSize_t, 5>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<6>(const std::array<Aidge::DimSize_t, 6>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<7>(const std::array<Aidge::DimSize_t, 7>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<8>(const std::array<Aidge::DimSize_t, 8>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<9>(const std::array<Aidge::DimSize_t, 9>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<10>(const std::array<Aidge::DimSize_t, 10>&, const std::string&, bool);
+
+std::shared_ptr<Aidge::Node> Aidge::Producer(const std::shared_ptr<Aidge::Tensor> tensor,
+            const std::string& name,
+            bool constant)
+{
+    return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor, constant), name);
+}
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::addProducer(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, DIM>& dims,
+        const std::string& extension)
+{
+    AIDGE_ASSERT(inputIdx < gk_IODefaultIndex, "Input index too high. Cannot create Producer");
+    static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
+    const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
+    auto prod = Producer(dims, prodName);
+    prod->addChild(otherNode, 0, inputIdx);
+    otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
+    return prod;
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<1>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 1>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<2>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 2>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<3>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 3>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<4>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 4>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<5>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 5>& dims,
+        const std::string& extension);
diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp
index 7b945a7d62ab0ef7f73a25f6f74430e725d17b48..03f9e0679facc452d5a8bdc71707a824240f15ac 100644
--- a/src/operator/ReLU.cpp
+++ b/src/operator/ReLU.cpp
@@ -19,7 +19,27 @@
 
 const std::string Aidge::ReLU_Op::Type = "ReLU";
 
+Aidge::ReLU_Op::ReLU_Op(const Aidge::ReLU_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(ReLU_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ReLU_Op::clone() const {
+    return std::make_shared<ReLU_Op>(*this);
+}
+
 void Aidge::ReLU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(ReLU_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+/////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ReLU(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 96f2f855f46275e167acb1300434f8bcdbdd7d3e..d80525adc68f9692a042fdca2ce6869ac0600f5a 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -26,6 +26,28 @@
 
 const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 
+Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, Aidge::DimSize_t keep_dims)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ReduceMeanAttr::Axes>(axes),
+        attr<ReduceMeanAttr::KeepDims>(keep_dims)))
+{}
+
+Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ReduceMean_Op::clone() const {
+    return std::make_shared<ReduceMean_Op>(*this);
+}
+
 bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // make Axes attribute positive
@@ -56,4 +78,13 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(ReduceMean_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ReduceMean(const std::vector<std::int32_t> &axes,
+                                        Aidge::DimSize_t keep_dims,
+                                        const std::string& name) {
+    AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
+    return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims), name);
 }
\ No newline at end of file
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index cc31eeea758853a4183569d58412c427bd32006c..5139a0b0c98b11a0cbf6770397be56c830d0aa49 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -28,8 +28,35 @@ void Aidge::Reshape_OpImpl::forward() {
     op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
 }
 
+//////////////////////////////////////////////////
+
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
+Aidge::Reshape_Op::Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ReshapeAttr::Shape>(shape),
+        attr<ReshapeAttr::AllowZero>(allowzero)))
+{
+    mImpl = std::make_shared<Reshape_OpImpl>(*this);
+}
+
+Aidge::Reshape_Op::Reshape_Op(const Aidge::Reshape_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Reshape_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Reshape_Op::clone() const {
+    return std::make_shared<Reshape_Op>(*this);
+}
+
 bool Aidge::Reshape_Op::dimsForwarded() const {
     if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
@@ -108,3 +135,12 @@ void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t d
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+//////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Reshape(const std::vector<std::int64_t>& shape,
+                            bool allowzero,
+                            const std::string &name)
+{
+    return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape, allowzero), name);
+}
\ No newline at end of file
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index 0d407d4f97a17b8a89378bc83c1039423d9b2949..f3a69848ebd3cb7dbfb43788d16030e21e071b9c 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -25,6 +25,35 @@
 
 const std::string Aidge::Resize_Op::Type = "Resize";
 
+Aidge::Resize_Op::Resize_Op()
+    : OperatorTensor(Type,
+        {InputCategory::Data,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData},
+        1) {}
+
+/**
+ * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+ * but not its input tensors (the new operator has no input associated).
+ * @param op Operator to copy.
+ */
+
+Aidge::Resize_Op::Resize_Op(const Aidge::Resize_Op& op)
+    : OperatorTensor(op)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Resize_Op, *this, op.backend());
+    }
+    else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Resize_Op::clone() const {
+    return std::make_shared<Resize_Op>(*this);
+}
+
 bool Aidge::Resize_Op::dimsForwarded() const {
     // in case of ROI add getInput(1) condition
     if ((getInput(1) && !getInput(1)->undefined())
@@ -89,10 +118,10 @@ bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
             std::shared_ptr<Tensor> fallback;
             const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
 
-            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {            
+            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {
                 outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
             }
-            
+
             mOutputs[0]->resize(outDims);
             return true;
         }
@@ -101,14 +130,14 @@ bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
         }
     }
 
-    return false; 
+    return false;
 }
 
 void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Resize_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 
-    // By default, automatically set backend for all inputs: roi, scales and sizes 
+    // By default, automatically set backend for all inputs: roi, scales and sizes
     if(getInput(1)) {
         getInput(1)->setBackend(name, device);
     }
@@ -119,3 +148,9 @@ void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
         getInput(3)->setBackend(name, device);
     }
 }
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Resize(const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index dc5e272210feb09fd5dac6ba4b16f9ba8dc93bf0..a53695b58aab9ea8a50e15638b4c50d42cf444dd 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -20,7 +20,40 @@
 
 const std::string Aidge::Scaling_Op::Type = "Scaling";
 
+Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ScalingAttr::ScalingFactor>(scalingFactor),
+        attr<ScalingAttr::QuantizedNbBits>(nbBits),
+        attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
+{}
+
+Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Scaling_Op::clone() const {
+    return std::make_shared<Scaling_Op>(*this);
+}
+
 void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Scaling_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Scaling(float scalingFactor,
+                                     std::size_t quantizedNbBits,
+                                     bool isOutputUnsigned,
+                                     const std::string& name)
+{
+    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
 }
\ No newline at end of file
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index 39f5e2fe09b7ac750b8ea9d48d17fc2e97013c1a..f2ad1005907b71ee279b9d9bc9853b667108855c 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -30,8 +30,35 @@ void Aidge::Shape_OpImpl::forward() {
                                          end - start + 1);
 }
 
+///////////////////////////////////////////////
+
 const std::string Aidge::Shape_Op::Type = "Shape";
 
+Aidge::Shape_Op::Shape_Op(const std::int64_t start, const std::int64_t end)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ShapeAttr::Start>(start),
+        attr<ShapeAttr::End>(end)))
+{
+    mImpl = std::make_shared<Shape_OpImpl>(*this);
+}
+
+Aidge::Shape_Op::Shape_Op(const Aidge::Shape_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Shape_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Shape_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Shape_Op::clone() const {
+    return std::make_shared<Shape_Op>(*this);
+}
+
 bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         if (this->start() < 0)
@@ -63,3 +90,9 @@ void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+//////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Shape(const std::int64_t start, const std::int64_t end, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
+}
\ No newline at end of file
diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp
index ede83e291bd1670885192e3ac8f4958e185c28e2..63480ffccaaf78b2dd951c75b3830a8dfede7d99 100644
--- a/src/operator/ShiftGELU.cpp
+++ b/src/operator/ShiftGELU.cpp
@@ -21,7 +21,29 @@
 
 const std::string Aidge::ShiftGELU_Op::Type = "ShiftGELU";
 
+Aidge::ShiftGELU_Op::ShiftGELU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::ShiftGELU_Op::ShiftGELU_Op(const Aidge::ShiftGELU_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ShiftGELU_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ShiftGELU_Op::clone() const {
+    return std::make_shared<ShiftGELU_Op>(*this);
+}
+
 void Aidge::ShiftGELU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(ShiftGELU_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+///////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ShiftGELU(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ShiftGELU_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp
index eb77ae655354eac03fbdc0f1a84a44391795ee8c..5b0dd7ace0984c2397ef3a7bb4ef7a5526f4f288 100644
--- a/src/operator/ShiftMax.cpp
+++ b/src/operator/ShiftMax.cpp
@@ -21,7 +21,33 @@
 
 const std::string Aidge::ShiftMax_Op::Type = "ShiftMax";
 
+Aidge::ShiftMax_Op::ShiftMax_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::ShiftMax_Op::ShiftMax_Op(const Aidge::ShiftMax_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ShiftMax_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+/**
+ * @brief Clone the operator using its copy-constructor.
+ * @see Operator::ShiftMax_Op
+ */
+std::shared_ptr<Aidge::Operator> Aidge::ShiftMax_Op::clone() const {
+    return std::make_shared<ShiftMax_Op>(*this);
+}
+
 void Aidge::ShiftMax_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(ShiftMax_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+/////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ShiftMax(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ShiftMax_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp
index a6edcf823695f95253d6c56e45975480909679d3..aa112378fde50c7f36c63b8c0a8d00ed0baab12b 100644
--- a/src/operator/Sigmoid.cpp
+++ b/src/operator/Sigmoid.cpp
@@ -20,7 +20,30 @@
 
 const std::string Aidge::Sigmoid_Op::Type = "Sigmoid";
 
+Aidge::Sigmoid_Op::Sigmoid_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::Sigmoid_Op::Sigmoid_Op(const Aidge::Sigmoid_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Sigmoid_Op::clone() const {
+    return std::make_shared<Sigmoid_Op>(*this);
+}
+
+
 void Aidge::Sigmoid_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Sigmoid_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+///////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Sigmoid(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Sigmoid_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 4fcfd587a9b3d8858b2e8a71605743c6702cb310..bd7a4750dcbb129b56c541b3e75c2ec6faa7d55a 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -11,7 +11,6 @@
 
 #include "aidge/operator/Slice.hpp"
 
-#include <cassert>
 #include <cstddef>
 #include <cstdint>
 #include <string>
@@ -28,6 +27,41 @@
 
 const std::string Aidge::Slice_Op::Type = "Slice";
 
+Aidge::Slice_Op::Slice_Op(const std::vector<std::int64_t>& starts,
+                        const std::vector<std::int64_t>& ends,
+                        const std::vector<std::int8_t>& axes,
+                        const std::vector<std::int64_t>& steps)
+    : OperatorTensor(Type,
+        {InputCategory::Data,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData},
+        1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<SliceAttr::Starts>(starts),
+        attr<SliceAttr::Ends>(ends),
+        attr<SliceAttr::Axes>(axes),
+        attr<SliceAttr::Steps>(steps)))
+{}
+
+Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op &op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Slice_Op, *this, op.backend());
+    }
+    else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Slice_Op::clone() const {
+    return std::make_shared<Slice_Op>(*this);
+}
+
+
 bool Aidge::Slice_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined())
         || (getInput(2) && !getInput(2)->undefined())
@@ -177,3 +211,13 @@ void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     SET_IMPL_MACRO(Slice_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Slice(const std::vector<std::int64_t>& starts,
+                                   const std::vector<std::int64_t>& ends,
+                                   const std::vector<std::int8_t>& axes,
+                                   const std::vector<std::int64_t>& steps,
+                                   const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes, steps), name);
+}
\ No newline at end of file
diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp
index 612c61b0f66b97eb4630214538a22154a67b80d8..f425d6fffb8934f00b1c503c1d296b8318377cb0 100644
--- a/src/operator/Softmax.cpp
+++ b/src/operator/Softmax.cpp
@@ -20,7 +20,34 @@
 
 const std::string Aidge::Softmax_Op::Type = "Softmax";
 
+Aidge::Softmax_Op::Softmax_Op(std::int32_t axis)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<SoftmaxAttr::Axis>(axis)))
+{}
+
+Aidge::Softmax_Op::Softmax_Op(const Aidge::Softmax_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Softmax_Op::clone() const {
+    return std::make_shared<Softmax_Op>(*this);
+}
+
 void Aidge::Softmax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Softmax_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Softmax(std::int32_t axis, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
 }
\ No newline at end of file
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index af7474d8a21db9ece237440b46ecf57db9b270b4..9c56c6a2a28c6acb8c3943cd859fdbe78fd2cd1b 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -52,8 +52,37 @@ void Aidge::Split_OpImpl::forward() {
     }
 }
 
+/////////////////////////////////////////////////////
+
 const std::string Aidge::Split_Op::Type = "Split";
 
+Aidge::Split_Op::Split_Op(std::int8_t axis,
+                        Aidge::DimSize_t nbOutputs,
+                        const std::vector<Aidge::DimSize_t>& split)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<SplitAttr::Axis>(axis),
+        attr<SplitAttr::Split>(split)))
+{
+    mImpl = std::make_shared<Split_OpImpl>(*this);
+}
+
+Aidge::Split_Op::Split_Op(const Aidge::Split_Op &op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Split_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Split_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Split_Op::clone() const {
+    return std::make_shared<Split_Op>(*this);
+}
+
 bool Aidge::Split_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined()))
     {
@@ -120,7 +149,7 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
 
         return true;
     }
-    
+
     return false;
 }
 
@@ -135,5 +164,14 @@ void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     {
         mOutputs[i]->setBackend(name, device);
     }
-    
+
 }
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Split(Aidge::DimSize_t nbOutput,
+                                   std::int8_t axis,
+                                   const std::vector<Aidge::DimSize_t>& split,
+                                   const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
+}
\ No newline at end of file
diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp
index d8ac8b8b0bf28110bd52493d7833f64e9e80fc6a..579d63b3151b0c236b34a488fc9e74a450c9f24f 100644
--- a/src/operator/Sqrt.cpp
+++ b/src/operator/Sqrt.cpp
@@ -14,13 +14,35 @@
 #include <memory>
 #include <string>
 
+#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 const std::string Aidge::Sqrt_Op::Type = "Sqrt";
 
+Aidge::Sqrt_Op::Sqrt_Op(const Aidge::Sqrt_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Sqrt_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+
+std::shared_ptr<Aidge::Operator> Aidge::Sqrt_Op::clone() const {
+    return std::make_shared<Sqrt_Op>(*this);
+}
+
 void Aidge::Sqrt_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Sqrt_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////
+
+inline std::shared_ptr<Aidge::Node> Aidge::Sqrt(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index 858b32beaf9e23e8e9e7f52cfe7176afe399843c..ee4fd5b0887c5d9fafa3acd5822334dba4070aa8 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -24,6 +24,20 @@
 
 const std::string Aidge::Sub_Op::Type = "Sub";
 
+Aidge::Sub_Op::Sub_Op(const Aidge::Sub_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Sub_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Sub_Op::clone() const {
+    return std::make_shared<Sub_Op>(*this);
+}
+
 bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
@@ -57,3 +71,9 @@ void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Sub_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+//////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Sub(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
+}
diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp
index c113ee6f2da52f40a66a8df04ca33ec4b85f3387..1f936b6c8c5f61d86e2832c4bee7b943fa8268a1 100644
--- a/src/operator/Tanh.cpp
+++ b/src/operator/Tanh.cpp
@@ -20,7 +20,29 @@
 
 const std::string Aidge::Tanh_Op::Type = "Tanh";
 
+Aidge::Tanh_Op::Tanh_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::Tanh_Op::Tanh_Op(const Aidge::Tanh_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Tanh_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Tanh_Op::clone() const {
+    return std::make_shared<Tanh_Op>(*this);
+}
+
 void Aidge::Tanh_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Tanh_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Tanh(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Tanh_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 30372e44f8f9641734fc1109bf03a64794383a3e..bd1acee8a820ad2e3e54b7b0b21f979fc9ce1feb 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -28,8 +28,34 @@ void Aidge::TransposeImpl::forward() {
     op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.outputDimsOrder());
 }
 
+///////////////////////////////////////////////////
+
 const std::string Aidge::Transpose_Op::Type = "Transpose";
 
+Aidge::Transpose_Op::Transpose_Op(const std::vector<Aidge::DimSize_t> &outputDimsOrder)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)))
+{
+    mImpl = std::make_shared<TransposeImpl>(*this);
+}
+
+Aidge::Transpose_Op::Transpose_Op(const Aidge::Transpose_Op& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<TransposeImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Transpose_Op::clone() const {
+    return std::make_shared<Transpose_Op>(*this);
+}
+
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         AIDGE_ASSERT(!getInput(0)->empty(), "Not applicable on scalars.");
@@ -52,3 +78,10 @@ void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+//////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Transpose(const std::vector<Aidge::DimSize_t> &outputDimsOrder,
+                                           const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
+}
\ No newline at end of file
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
index 94c970fd3a246f0d9e1237e7cce0c15dd8e24526..2b12f33585a7388bd2411a8ae84ef43915516024 100644
--- a/src/operator/Unfold.cpp
+++ b/src/operator/Unfold.cpp
@@ -65,9 +65,44 @@ void Aidge::Unfold_OpImpl<DIM>::forward() {
     }
 }
 
+template class Aidge::Unfold_OpImpl<2>;
+
+/////////////////////////////////////////////////////////////
+
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::Unfold_Op<DIM>::Type = "Unfold";
 
+template <Aidge::DimIdx_t DIM>
+Aidge::Unfold_Op<DIM>::Unfold_Op(const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                    const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                    const std::array<Aidge::DimSize_t, DIM> &dilationDims)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<UnfoldAttr::StrideDims>(strideDims),
+        attr<UnfoldAttr::DilationDims>(dilationDims),
+        attr<UnfoldAttr::KernelDims>(kernelDims)))
+{
+    mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+Aidge::Unfold_Op<DIM>::Unfold_Op(const Aidge::Unfold_Op<DIM> &op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::Unfold_Op<DIM>::clone() const {
+    return std::make_shared<Unfold_Op>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -103,5 +138,20 @@ void Aidge::Unfold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx
     mOutputs[0]->setBackend(name, device);
 }
 
-template class Aidge::Unfold_OpImpl<2>;
-template class Aidge::Unfold_Op<2>;
\ No newline at end of file
+template class Aidge::Unfold_Op<2>;
+
+///////////////////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Unfold(const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilationDims) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
+    return std::make_shared<Node>(std::make_shared<Unfold_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Unfold<2>(const std::array<Aidge::DimSize_t, 2>&,
+                                  const std::string&,
+                                  const std::array<Aidge::DimSize_t, 2>&,
+                                  const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file