diff --git a/aidge_core/unit_tests/test_recipes.py b/aidge_core/unit_tests/test_recipes.py
index 8a0a470221e118fd450be7a7bf1bf6ede2df6178..c8dd4c727fbaf8224e8d04111a5054caeb5e5c99 100644
--- a/aidge_core/unit_tests/test_recipes.py
+++ b/aidge_core/unit_tests/test_recipes.py
@@ -65,7 +65,7 @@ class test_recipes(unittest.TestCase):
         graph_view.add(b1)
 
         old_nodes = graph_view.get_nodes()
-        aidge_core.fuse_mul_add(graph_view)
+        aidge_core.matmul_to_fc(graph_view)
 
         self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 2)
         self.assertTrue("MatMul0" not in [i.name() for i in graph_view.get_nodes()])
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 651a5de69596ee867a97b06ba683f49b05a41303..d77e6693b27c08da5c60f5410406a08e4863f1c4 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -36,6 +36,8 @@
 #include "aidge/nodeTester/ConditionalInterpreter.hpp"
 
 #include "aidge/operator/Add.hpp"
+#include "aidge/operator/And.hpp"
+#include "aidge/operator/ArgMax.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/Concat.hpp"
@@ -47,6 +49,7 @@
 #include "aidge/operator/Gather.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/GlobalAveragePooling.hpp"
+#include "aidge/operator/GridSample.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/MetaOperator.hpp"
@@ -57,6 +60,7 @@
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/Pow.hpp"
 #include "aidge/operator/ReduceMean.hpp"
+#include "aidge/operator/ReduceSum.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Reshape.hpp"
 #include "aidge/operator/Resize.hpp"
diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp
index 62d10a6983e8cf5fd8e2730d3203bed97284e336..6c19b5355e406454a2e20bc8994d0ab04d53576a 100644
--- a/include/aidge/data/DataProvider.hpp
+++ b/include/aidge/data/DataProvider.hpp
@@ -35,6 +35,9 @@ private:
     // Desired size of the produced batches
     const std::size_t mBatchSize;
 
+    // The backend for data tensors
+    std::string mBackend;
+
     // Enable random shuffling for learning
     const bool mShuffle;
 
@@ -67,7 +70,7 @@ public:
      * @param database database from which to load the data.
      * @param batchSize number of data samples per batch.
      */
-    DataProvider(const Database& database, const std::size_t batchSize, const bool shuffle = false, const bool dropLast = false);
+    DataProvider(const Database& database, const std::size_t batchSize, const std::string& backend = "cpu", const bool shuffle = false, const bool dropLast = false);
 
 public:
     /**
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index b4c5de2ebe5c18e91da8fe4474ea74cf338b0fa6..c7b712be460a748df12447b15883eff58abbf690 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -312,6 +312,18 @@ class Tensor : public Data,
      */
     Tensor sqrt() const;
 
+    /**
+     * @brief Element-wise abs operation for Tensor.
+     * @return Tensor
+     */
+    Tensor abs() const;
+
+    /**
+     * @brief Mean operation for Tensor.
+     * @return Tensor
+     */
+    Tensor mean() const;
+
     ~Tensor() noexcept;
 
 public:
@@ -346,22 +358,7 @@ public:
      * @param copyFrom If true (default), move data from previous backend/device
      * to the new one. Previous data is lost otherwise.
      */
-    inline void setBackend(const std::string &name, DeviceIdx_t device = 0, bool copyFrom = true) {
-        if (mImpl) {
-            if (mImpl->device() != std::make_pair(name, device)) {
-                // Backend change: create new impl, copy from old to new and replace
-                // impl
-                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
-                if (copyFrom) {
-                    newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0);
-                }
-                setImpl(newImpl);
-            }
-        }
-        else {
-            mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
-        }
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0, bool copyFrom = true);
 
     /**
      * @brief Get a list of available backends.
@@ -464,12 +461,16 @@ public:
      */
     constexpr inline const std::vector<DimSize_t>& dims() const noexcept { return mDims; }
 
+    inline DimSize_t dim(DimIdx_t idx) const { return mDims[idx]; }
+
     /**
      * @brief Get strides of the Tensor object.
      * @return constexpr const std::vector<DimSize_t>&
      */
     constexpr inline const std::vector<DimSize_t>& strides() const noexcept { return mStrides; }
 
+    inline DimSize_t stride(DimIdx_t idx) const { return mStrides[idx]; }
+
     /**
      * @brief Return true if Tensor is contiguous in memory.
      * @return bool
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 73dc7950daec42b803a3e14f596725a6ede34658..17bd3b1e9aeece2c80dab8c1aa1cba6498cc730f 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -96,6 +96,12 @@ public:
      */
     inline void setName(const std::string &name) { mName = name; }
 
+    /**
+     * @brief Set the name of every Node based on the current GraphView name in
+     * following form: "name_type#type-id"
+     */
+    void setNodesName()const;
+
     /**
      * @brief Save the GraphView as a Mermaid graph in a .md file at the
      * specified location.
diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
index 31bae71e9b433d1b82ffe62d93837f440c8a936f..951aa6b29d73d9055cf9f13c8ddc6313cb506879 100644
--- a/include/aidge/graph/Matching.hpp
+++ b/include/aidge/graph/Matching.hpp
@@ -29,6 +29,12 @@ namespace Aidge {
 class SinglePassGraphMatching {
 public:
     struct Context {
+        Context();
+        Context(const Context&); // explicitly define Context copy constructor
+                                 // to avoid automatic inlining
+        Context& operator=(const Context&);
+        ~Context() noexcept;
+
         std::string query;
         bool firstSequence = true;
         bool firstNode = true;
@@ -37,6 +43,7 @@ public:
         bool singleOutput = true;
         IOIndex_t edgeLeftIdx = 0;
         IOIndex_t edgeRightIdx = 0;
+        NodePtr startNode;
 
         // For check & debug purpose:
         size_t depth = 0;
@@ -52,44 +59,36 @@ public:
         mutable std::map<std::string, std::map<std::string, NodePtr>> anchors;
         mutable NodePtr startNode;
 
-        MatchingResult() {
-            graph = std::make_shared<GraphView>();
-        }
+        MatchingResult();
 
-        MatchingResult(const MatchingResult& result) {
-            graph = std::make_shared<GraphView>(*(result.graph.get()));
-            anchors = result.anchors;
-            startNode = result.startNode;
-        }
-
-        MatchingResult& operator=(const MatchingResult& result) {
-            graph = std::make_shared<GraphView>(*(result.graph.get()));
-            anchors = result.anchors;
-            startNode = result.startNode;
-            return *this;
-        }
+        MatchingResult(const MatchingResult& other);
+        MatchingResult& operator=(const MatchingResult& other);
+        ~MatchingResult() noexcept;
     };
 
     SinglePassGraphMatching(std::shared_ptr<GraphView> graph) : mGraph(graph) {}
+    SinglePassGraphMatching(const SinglePassGraphMatching& other);
+    SinglePassGraphMatching& operator=(const SinglePassGraphMatching& other);
+    ~SinglePassGraphMatching() noexcept;
 
     /**
      * Matches a query by direct, single pass parse and match.
      * The returned matches are non-ordered and therefore stored in a std::set.
-     * 
+     *
      * Some rules:
      * - The first node of the first sequence is the root node and cannot be optional
      *   WRONG: Conv?->ReLU (will throw an error)
      *   GOOD: ReLU<-Conv?
-     * 
+     *
      * - The first node of any further sequence must be an existing anchor
      *   (the anchor cannot be in the middle of the sequence)
      *   WRONG: Conv->ReLU;Pad->Conv (will throw an error)
      *          Pad->Conv;Conv->ReLU (will throw an error)
      *   GOOD: Conv#->ReLU;Conv#<-Pad
      *         Pad->Conv#;Conv#->ReLU
-     * 
+     *
      * - Any node already matched cannot be matched again (except for anchors)
-     * 
+     *
      * - By default, an edge matches the first output to the first input.
      *   EXAMPLE: ReLU->Conv is equivalent to ReLU-0-0>Conv
      *            To match the second input, use ReLU-0-1>Conv (or ReLU-1>Conv)
@@ -97,14 +96,14 @@ public:
      *            To match any input and/or any output, use *, like ReLU-1-*>Conv
      *            or ReLU-*-0>Conv or ReLU-*-*>Conv
      *            The same is true for the "<-" edge syntax.
-     * 
+     *
      * - When several nodes could match for a given node query, the first one
-     *   not already in the matching result is matched, following the 
+     *   not already in the matching result is matched, following the
      *   childs/parents ordered node list
      *   EXAMPLE: Producer in "Conv<*-Producer" will match the weights Producer first
      *   EXAMPLE: Producer in "Conv#<1-.;Conv#<*-Producer" will match the bias Producer
      *            because the weights Producer has already been matched
-     * 
+     *
      * - One always matches a sub-graph: additional connections can exist anywhere
      *   in the matched sub-graph
      *   EXAMPLE: "Add<*-." will match the Add operator and its first input, any
@@ -112,7 +111,7 @@ public:
      *   EXAMPLE: "(Add#<*-.)+" will match the Add operator and all of its inputs
      *            Note that the anchor is required since we intend to match several
      *            inputs of the same node!
-     * 
+     *
      * - In Aidge, a node output can be connected to multiple other nodes. In
      *   your query, you can allow it or not, with the "~" or "-" modifier.
      *   EXAMPLE: "Conv->ReLU" will match the Conv that are **only** connected
@@ -121,7 +120,7 @@ public:
      *            if they are also connected to other nodes at the same output #0.
      *   When implementing a match & replace recipe, beware that you don't break
      *   branches in the middle of your matching result if you use "~"!
-     * 
+     *
      * - The matching results can be overlapping, meaning that some nodes may be
      *   found in multiple results. Some results may be subsets of other results.
      *   EXAMPLE: assume graph Conv#1->ReLU#1->Conv#2->ReLU#2
@@ -129,17 +128,27 @@ public:
      *            Conv#1->ReLU#1->Conv#2->ReLU#2 and Conv#2->ReLU#2
      *   To avoid this behavior, set the disjoint argument to true. In this case,
      *   only Conv#1->ReLU#1->Conv#2->ReLU#2 will be kept in the example above.
-     * 
+     *
      * - Whitespaces are allowed anywhere in the query
-     * 
+     *
      * QUERY = SEQ | NODE_OR_BLOCK (';' (SEQ | NODE_OR_BLOCK))*
-     * 
+     *
      * @param query The query to search.
      * @param disjoint If true, only keep the longuest disjoint (non-overlapping) matches.
-     * @return Set of matches, each stored in a MatchingResult struct.
+     * @return std::set<MatchingResult> Set of matches, each stored in a MatchingResult struct.
     */
     std::set<MatchingResult> match(const std::string& query, bool disjoint = false);
 
+    /**
+     * @brief Same as match() but with a mandatory start node.
+     * 
+     * @param startNode Mandatory start node for the query.
+     * @param query The query to search.
+     * @return MatchingResult MatchingResult struct, with empty graph if query
+     * is not found, or the graph corresponding to the query.
+     */
+    MatchingResult matchFrom(NodePtr startNode, const std::string& query);
+
     /**
      * Filter to keep only the longuest disjoint (non-overlapping) matches.
     */
@@ -160,7 +169,7 @@ private:
     bool matchNodeOrBlock(Context& ctx, std::set<MatchingResult>& matches);
 
     /**
-     * BLOCK = '(' SEQ | PAR | BLOCK | ALT | NODE ')'
+     * BLOCK = '(' SEQ | PAR | ALT | BLOCK | NODE ')'
     */
     bool matchBlock(Context& ctx, std::set<MatchingResult>& matches);
 
@@ -192,7 +201,7 @@ private:
      * TYPE = [A-Za-z0-9_]+
      * ANCHOR = [A-Za-z0-9_]+
      * LAMBDA = [A-Za-z0-9_]+
-     * NODE = (TYPE | '.') ('#' ANCHOR)? ('[' LAMBDA ']')?
+     * NODE = ((TYPE | '.') ('#' ANCHOR)? ('[' LAMBDA ']')?) | '$'
     */
     bool matchNode(Context& ctx, std::set<MatchingResult>& matches);
 
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index f694a1234b6037a0ae75a89380af9747765e290c..3be17d6d21d18d63e75e384f2c6e037452db3a82 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -17,6 +17,7 @@
 #include <set>
 #include <string>
 #include <vector>
+#include <deque>
 #include <utility>
 
 #ifdef PYBIND
@@ -63,6 +64,9 @@ private:
   std::vector<std::vector<IOIndex_t>> mIdInChildren; /** List of input index for each Node linked to each output of the Node. */
   std::vector<IOIndex_t> mIdOutParents; /** index of the output linked to each input of the Node. Default: gk_IODefaultIndex. */
 
+  std::deque<std::function<bool()>> mForward;
+  std::deque<std::function<bool()>> mBackward;
+
 public:
   Node() = delete;
 
@@ -79,6 +83,22 @@ public:
     return lhs.shared_from_this() == rhs.shared_from_this();
   }
 
+  void addBeforeForward(std::function<bool()> func) {
+    mForward.push_front(func);
+  }
+
+  void addAfterForward(std::function<bool()> func) {
+    mForward.push_back(func);
+  }
+
+  void addBeforeBackward(std::function<bool()> func) {
+    mBackward.push_front(func);
+  }
+
+  void addAfterBackward(std::function<bool()> func) {
+    mBackward.push_back(func);
+  }
+
 public:
   ///////////////////////////////////////////////////////
   //        FUNCTIONAL DESCRIPTION
diff --git a/include/aidge/graph/OpArgs.hpp b/include/aidge/graph/OpArgs.hpp
index bbbab330df57f9512f0d9ffd3594eddd5f7a7536..f8ae20dc3ce3fdc004d2550915255c885b144ad7 100644
--- a/include/aidge/graph/OpArgs.hpp
+++ b/include/aidge/graph/OpArgs.hpp
@@ -36,6 +36,10 @@ public:
     OpArgs(const std::shared_ptr<Node>& node_)
      : mNode(node_) {assert(mNode && "The Node provided should not be a nullptr.");}
 
+    OpArgs(const OpArgs&);
+    OpArgs& operator=(const OpArgs&);
+    ~OpArgs() noexcept;
+
     inline std::shared_ptr<Node> node() const noexcept {
         return mNode;
     }
diff --git a/include/aidge/operator/Abs.hpp b/include/aidge/operator/Abs.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3c2f1bb388cf064be379f476f1d2df4491b57637
--- /dev/null
+++ b/include/aidge/operator/Abs.hpp
@@ -0,0 +1,71 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ABS_H_
+#define AIDGE_CORE_OPERATOR_ABS_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Abs_Op : public OperatorTensor,
+    public Registrable<Abs_Op, std::string, std::shared_ptr<OperatorImpl>(const Abs_Op&)> {
+public:
+    static const std::string Type;
+
+    Abs_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Abs_Op(const Abs_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl) {
+            SET_IMPL_MACRO(Abs_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Abs_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Abs_Op>(*this);
+    }
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Abs(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Abs_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ABS_H_ */
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 0e709afe9f175443a28947be7f4c3f5b01f5e362..97db476729abc07985b16de62084be5fce603bc9 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -28,13 +28,7 @@ class Add_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Add_Op(const IOIndex_t nbIn)
-        : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
-    {
-        if (nbIn == 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
-        }
-    }
+    Add_Op(const IOIndex_t nbIn);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -46,9 +40,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Add_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Add_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
@@ -72,9 +64,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
-}
+std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_ADD_H_ */
diff --git a/include/aidge/operator/And.hpp b/include/aidge/operator/And.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..04a2fab1ed3569da161049ecece85a6e906e1cd8
--- /dev/null
+++ b/include/aidge/operator/And.hpp
@@ -0,0 +1,81 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_AND_H_
+#define AIDGE_CORE_OPERATOR_AND_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+/**
+ * @brief Tensor element-wise logical and operation.
+ */
+class And_Op : public OperatorTensor,
+    public Registrable<And_Op, std::string, std::shared_ptr<OperatorImpl>(const And_Op&)> {
+public:
+    static const std::string Type;
+
+    /**
+     * @brief Compute element-wise and operation on two given inputs.
+     * @details supports broadcasting of both operands.
+     */
+    And_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    And_Op(const And_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl) {
+            SET_IMPL_MACRO(And_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::And_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<And_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input_1", "data_input_2"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> And(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<And_Op>(), name);
+}
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_OPERATOR_AND_H_ */
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..1b11e211d23563d75bf943a96fa26bc84a3aa4b8
--- /dev/null
+++ b/include/aidge/operator/ArgMax.hpp
@@ -0,0 +1,135 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ARGMAX_H_
+#define AIDGE_CORE_OPERATOR_ARGMAX_H_
+
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ArgMaxAttr { Axis, KeepDims, SelectLastIndex };
+
+/**
+ * @brief This operator has as purpose to reduce given dimension by replacing with the Max value's index.
+*/
+class ArgMax_Op : public OperatorTensor,
+                public Registrable<ArgMax_Op, std::string, std::shared_ptr<OperatorImpl>(const ArgMax_Op &)> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<ArgMaxAttr,
+                                        std::int32_t,
+                                        bool,
+                                        bool>;
+    template <ArgMaxAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    ArgMax_Op() = delete;
+
+    /**
+     * @brief constructor for ArgMax op
+     * @param[in] axis around which perform the operation
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axis and 
+     * if false we remove the dimension completely
+     * @param[in] select_last_index in case we have many maximum, if true the last index is returned 
+     * if false the first index is returned. 
+     */
+    ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_index)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ArgMaxAttr::Axis>(axis),
+            attr<ArgMaxAttr::KeepDims>(keep_dims),
+            attr<ArgMaxAttr::SelectLastIndex>(select_last_index)))
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ArgMax_Op(const ArgMax_Op& op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ArgMax_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ArgMax_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ArgMax_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::Axis>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::KeepDims>(); }
+    inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::SelectLastIndex>(); }
+
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+/**
+ * @brief Compute the max value of a Tensor over the provided axes. Dimensions
+ * may be reduced by erasing the provided axis or not.
+ *
+ * @param axis Dimension over which data max should be computed.
+ * @param keep_dims Whether or not reduced dimensions are to be erased.
+ * @param select_last_index Whether to select the last index of max elements in case there are many maximums.
+ * By default the first max element index is 
+ * @param name Name of the Operator.
+ * @return std::shared_ptr<Node> Node containing the Operator.
+ */
+inline std::shared_ptr<Node> ArgMax(std::int32_t axis=0,
+                                    bool keep_dims=true,
+                                    bool select_last_index=false,
+                                    const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ArgMax_Op>(axis, keep_dims, select_last_index), name);
+
+}
+
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ARGMAX_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 920829473d856b2a4c14fc0859abcd4c3b70277a..b2f4ce92580afddcc7aa3627ea0ef89d4ac3ffee 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -64,9 +64,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::AvgPooling_Op
      */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<AvgPooling_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override final;
 
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
@@ -93,12 +91,9 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                            const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
-    return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
-}
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 08d1f6a88d394e34dd6e351f500429113a52c9fa..7f1f63c68a512c4b6a59a515d6130afe9696a8c2 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -64,9 +64,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::BatchNorm_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<BatchNorm_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
@@ -103,11 +101,11 @@ std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures,
                                        const float epsilon = 1.0e-5F,
                                        const float momentum = 0.1F,
                                        const std::string& name = "");
+}  // namespace Aidge
 
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&);
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&);
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&);
-}  // namespace Aidge
 
 namespace {
 template <>
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 291669b7c57c14a77ffa6b40fa2aefab8d281fc7..fd12f551a2251f3dfe8ea0a0d0528d9dad742e42 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -86,9 +86,8 @@ public:
 };
 
 
-inline std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
-}
+std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "");
+
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index ab14bf527dd9949f3bb2b6157619e58c7c7580ee..46cd3a5a328984bde7e537d984b30e0774a3d259 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -50,40 +50,19 @@ private:
 public:
     Concat_Op() = delete;
 
-    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis)
-        : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ConcatAttr::Axis>(axis)))
-    {
-        if (nbIn == 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
-        }
-        mImpl = std::make_shared<Concat_OpImpl>(*this);
-    }
+    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Concat_Op(const Concat_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Concat_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Concat_OpImpl>(*this);
-        }
-    }
+    Concat_Op(const Concat_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Concat_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Concat_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -100,9 +79,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
-}
+std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "");
 }
 
 namespace {
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index e89c94f968ab89f43e6ef2d95a40a6f557cc41c7..7366472d24b78b58aab589ea2b3ccd045e4a5ea7 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -140,22 +140,13 @@ public:
  * @return std::shared_ptr<Node> A Node containing the operator.
  */
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
+std::shared_ptr<Node> Conv(DimSize_t inChannels,
                                   DimSize_t outChannels,
                                   const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                                  bool noBias = false) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
-    addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
-    if (!noBias) {
-        addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
-    }
-    return conv;
-}
+                                  bool noBias = false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 1acf240bfcdd256953cd96b92e3622a265aafa0b..63d8e8419b47279c51783db057b5b1a63c7d0884 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -105,21 +105,12 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
+std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            const std::array<DimSize_t, DIM> &kernelDims,
                                            const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                                            const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                                           bool noBias=false) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
-    addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
-    if (!noBias) {
-        addProducer(convDW, 2, {nbChannels}, "b");
-    }
-    return convDW;
-}
+                                           bool noBias=false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..72ff83834962c1860b135a4187e72199b04361db
--- /dev/null
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -0,0 +1,95 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
+#define AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class DepthToSpace_OpImpl : public OperatorImpl {
+public:
+    DepthToSpace_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
+enum class DepthToSpaceAttr { BlockSize, Mode };
+
+
+class DepthToSpace_Op : public OperatorTensor,
+                public Registrable<DepthToSpace_Op,
+                    std::string,
+                    std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)> {
+public:
+    static const std::string Type;
+    enum class Mode { DCR, CRD };
+
+private:
+    using Attributes_ = StaticAttributes<DepthToSpaceAttr, std::uint32_t, Mode>;
+    template <DepthToSpaceAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    DepthToSpace_Op() = delete;
+
+    DepthToSpace_Op(const std::uint32_t blockSize, const Mode mode = Mode::CRD);
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    DepthToSpace_Op(const DepthToSpace_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::DepthToSpace_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::uint32_t& blockSize() const { return mAttributes->template getAttr<DepthToSpaceAttr::BlockSize>(); }
+    inline Mode& mode() const { return mAttributes->template getAttr<DepthToSpaceAttr::Mode>(); }
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> DepthToSpace(const std::uint32_t blockSize,
+                                    const DepthToSpace_Op::Mode mode = DepthToSpace_Op::Mode::CRD,
+                                    const std::string& name = "");
+
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
+}
+
+#endif //AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 3edb4a28851cffe060886a4660d6b524eb9b814a..b16a5e6733e8846b05e3e491cf5bc7f793d97f1c 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -66,9 +66,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Div(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
-}
-}
+std::shared_ptr<Node> Div(const std::string& name = "");
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_DIV_H_ */
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index f615fedeef6fea59d2177cf886e8d910f064f5c2..b6cc8f30c0fff3366cb1d3fea678e4cad8f9cb10 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -35,23 +35,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Erf_Op(const Erf_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Erf_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Erf_Op(const Erf_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Erf_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Erf_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
@@ -63,9 +53,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Erf(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Erf_Op>(), name);
-}
+std::shared_ptr<Node> Erf(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_ERF_H_ */
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 01da37a05414c5994ace767770e7c26fc8cd4646..31378532e28c05971e4e3eb5778d4821ce2b6fde 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -53,9 +53,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::FC_Op
      */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<FC_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override final;
 
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
@@ -63,6 +61,13 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    DimSize_t inChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Fully Connected (FC) operator has no weight Tensor associated so no specific number of input channel imposed.");
+        }
+        return getInput(1)->template dims<2>()[1];
+    }
+
     DimSize_t outChannels() const {
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Fully Connected (FC) operator has no weight Tensor associated so no specific number of output channel imposed.");
@@ -78,15 +83,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
-    addProducer(fc, 1, {outChannels, inChannels}, "w");
-    if (!noBias) {
-        addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
-    }
-    return fc;
-}
+std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index caf904e870425c000687ccd95397c92744020eec..aebe3879b94fd13c8226fffe42e513715d8e3e5a 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -67,25 +67,13 @@ public:
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Fold_Op(const Fold_Op<DIM> &op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
-        }
-        else {
-            mImpl = nullptr;
-        }
-    }
+    Fold_Op(const Fold_Op<DIM> &op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Fold_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Fold_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -106,15 +94,11 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Fold(const std::array<DimSize_t, DIM> &outputDims,
+std::shared_ptr<Node> Fold(const std::array<DimSize_t, DIM> &outputDims,
                                   const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported");
-    return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
-}
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
 
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> Fold(
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 5f3917e486e2e2188bfd23bd58a13b51d5fc7a59..f2e3b0fe8c063a5eec5e0c2140c3b7eabf3ee68a 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -55,39 +55,19 @@ public:
 
     Gather_Op(std::int8_t axis,
               const std::vector<int64_t>& indices,
-              const std::vector<DimSize_t>& gatheredShape)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
-        mAttributes(std::make_shared<Attributes_>(
-            attr<GatherAttr::Axis>(axis),
-            attr<GatherAttr::Indices>(indices),
-            attr<GatherAttr::GatheredShape>(gatheredShape)))
-    {
-        mImpl = std::make_shared<Gather_OpImpl>(*this);
-    }
+              const std::vector<DimSize_t>& gatheredShape);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Gather_Op(const Gather_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Gather_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Gather_OpImpl>(*this);
-        }
-    }
+    Gather_Op(const Gather_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Gather_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Gather_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -107,9 +87,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Gather_Op>(axis, indices, gatheredShape), name);
-}
+std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = "");
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 8196c4268e669001d99f25ed2cead546e1141aa7..41516a39723249b5b5c715a66ce3398dff8e65b1 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -35,43 +35,23 @@ private:
     const std::shared_ptr<DynamicAttributes> mAttributes;
 
 public:
-    GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut)
-        : OperatorTensor(type, inputsCategory, nbOut)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+    GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut);
 
-    GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
-        : OperatorTensor(type, [nbData, nbParam]() {
-                                std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
-                                inputsCategory.resize(nbData + nbParam, InputCategory::Param);
-                                return inputsCategory;
-                            }(), nbOut),
-          mAttributes(std::make_shared<DynamicAttributes>())
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+    GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    GenericOperator_Op(const GenericOperator_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
-    }
+    GenericOperator_Op(const GenericOperator_Op& op);
 
-    ~GenericOperator_Op() = default;
+    ~GenericOperator_Op() noexcept;
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::GenericOperator_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<GenericOperator_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 public:
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -111,10 +91,8 @@ public:
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
-inline std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector<InputCategory>& inputCategory, IOIndex_t nbOut,
-                                             const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, inputCategory, nbOut), name);
-}
+std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector<InputCategory>& inputCategory, IOIndex_t nbOut,
+                                             const std::string& name = "");
 
 /**
  * @brief Fictive custom operator not associated with any implementation.
@@ -126,10 +104,8 @@ inline std::shared_ptr<Node> GenericOperator(const std::string& type, const std:
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
-inline std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
-                                             const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
-}
+std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
+                                             const std::string& name = "");
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ */
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 8bb738e8b57598e4256d3850fc791976e73c834c..734e12344fed4cd25dd41e91dc8cfb18fea4fd45 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -39,18 +39,9 @@ public:
 
   GlobalAveragePooling_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
-  GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op)
-      : OperatorTensor(op) {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-  }
+  GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op);
 
-  std::shared_ptr<Operator> clone() const override {
-    return std::make_shared<GlobalAveragePooling_Op>(*this);
-  }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -64,11 +55,8 @@ public:
   }
 };
 
-inline std::shared_ptr<Node>
-GlobalAveragePooling(const std::string &name = "") {
-  return std::make_shared<Node>(std::make_shared<GlobalAveragePooling_Op>(),
-                                name);
-}
+std::shared_ptr<Node> GlobalAveragePooling(const std::string &name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_GLOBAL_AVERAGE_POOLING_H_ */
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..81900824ed0d26572e593982fa21ed900eda88ee
--- /dev/null
+++ b/include/aidge/operator/GridSample.hpp
@@ -0,0 +1,93 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_GRIDSAMPLE_H_
+#define AIDGE_CORE_OPERATOR_GRIDSAMPLE_H_
+
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+
+namespace Aidge {
+
+enum class GridSampleAttr { Mode, PaddingMode, AlignCorners };
+
+class GridSample_Op : public OperatorTensor,
+	public Registrable<GridSample_Op, std::string, std::shared_ptr<OperatorImpl>(const GridSample_Op&)> {
+
+public:
+	static const std::string Type;
+
+	enum class Mode { Linear, Nearest, Cubic };
+	enum class PaddingMode { Zeros, Border, Reflection };
+
+private:
+	using Attributes_ = StaticAttributes<GridSampleAttr, Mode, PaddingMode, bool>;
+	template <GridSampleAttr e>
+	using attr = typename Attributes_::template attr<e>;
+	const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+	GridSample_Op(Mode mode = Mode::Linear,
+			PaddingMode paddingMode = PaddingMode::Zeros,
+			bool alignCorners = false);
+
+	GridSample_Op(const GridSample_Op& other);
+	~GridSample_Op() noexcept;
+
+public:
+
+	std::shared_ptr<Operator> clone() const override;
+
+	bool forwardDims(bool /*allowDataDependencies*/ = false) override final;
+
+	void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+	inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+	inline Mode mode() const { return mAttributes->template getAttr<GridSampleAttr::Mode>(); }
+	inline PaddingMode paddingMode() const { return mAttributes->template getAttr<GridSampleAttr::PaddingMode>(); }
+	inline bool alignCorners() const { return mAttributes->template getAttr<GridSampleAttr::AlignCorners>(); }
+
+	static const std::vector<std::string> getInputsName() {
+		return {"data_input", "grid_field"};
+	}
+	static const std::vector<std::string> getOutputsName() {
+		return {"data_output"};
+	}
+};
+
+std::shared_ptr<Node> GridSample(
+                        typename GridSample_Op::Mode mode = GridSample_Op::Mode::Linear,
+                        typename GridSample_Op::PaddingMode paddingMode = GridSample_Op::PaddingMode::Zeros,
+                        bool alignCorners = false,
+                        const std::string& name = "");
+
+} // namespace Aidge
+
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
+    "mode",
+    "padding_mode",
+    "align_corners"
+};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_GRIDSAMPLE_H_ */
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index e07df59d888993cb33da9c20393d897ab9cf1804..622d6290af55ef5a717c6f5763ade5a2750fb9f0 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -27,8 +27,6 @@
 
 namespace Aidge {
 
-
-
 /**
  * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
  * This Operator has no Implementation, it just forward its input Tensor.
@@ -41,29 +39,20 @@ class Identity_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Identity_Op()
-        : OperatorTensor(Type, {InputCategory::Data}, 1)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+    Identity_Op();
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Identity_Op(const Identity_Op& op)
-        : OperatorTensor(op)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
-    }
+    Identity_Op(const Identity_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Identity_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Identity_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     // bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
 
@@ -75,9 +64,7 @@ public:
      * @return true Input has dimensions.
      * @return false Input has no dimensions or is a nullptr.
      */
-    bool dimsForwarded() const override final {
-        return mInputs[0] ? (mInputs[0]->undefined() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
-    }
+    bool dimsForwarded() const override final;
 
 
     void forward() override final;
@@ -99,9 +86,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Identity(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Identity_Op>(), name);
-}
+std::shared_ptr<Node> Identity(const std::string& name = "");
+
 }
 
 #endif /* AIDGE_CORE_OPERATOR_IDENTITY_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 3057b99f70fa3693f7e434be29dcd40fb98d4bea..30d171eab3ee54864aae48f445e4d0f04792dd31 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -17,7 +17,6 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Data.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -54,31 +53,15 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    LeakyReLU_Op(const LeakyReLU_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    LeakyReLU_Op(const LeakyReLU_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::LeakyReLU_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<LeakyReLU_Op>(*this);
-    }
-
+    std::shared_ptr<Operator> clone() const override;
 
-
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
@@ -91,9 +74,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
-}
+std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "");
 }
 
 namespace {
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Ln.hpp
index d4010471c9af853556dbe1d60c8585d12f8fc638..c6a9ec4c8d59800cdbcc3f0229acdbbb436cd732 100755
--- a/include/aidge/operator/Ln.hpp
+++ b/include/aidge/operator/Ln.hpp
@@ -36,23 +36,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Ln_Op(const Ln_Op& op)
-        : OperatorTensor(op)
-    {
-       if (op.mImpl){
-            SET_IMPL_MACRO(Ln_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Ln_Op(const Ln_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Ln_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Ln_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -65,9 +55,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Ln(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Ln_Op>(), name);
-}
+std::shared_ptr<Node> Ln(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_LN_H_ */
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index be460ee88bd79592e29581f6acd64813ecc39bec..f81fb7bd0a3156fcffccc10fe3d460273f353252 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -36,22 +36,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    MatMul_Op(const MatMul_Op& op) : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(MatMul_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    MatMul_Op(const MatMul_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MatMul_Op
      */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<MatMul_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override final;
 
     /**
      * @brief Compute dimensions for the output Tensor following the same rules as
@@ -77,9 +68,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> MatMul(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
-}
+std::shared_ptr<Node> MatMul(const std::string& name = "");
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MATMUL_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 7e2c68681e645133812103a94e4c39ab9d1dc970..3b7473a6a17e8ebf490941068c8245d5847e0299 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -20,7 +20,6 @@
 #include <stdexcept>   // std::runtime_error
 #include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
@@ -51,71 +50,25 @@ private:
 public:
     MaxPooling_Op() = delete;
 
-    constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+    MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                            bool ceil_mode = false)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<MaxPoolingAttr::StrideDims>(stride_dims),
-            attr<MaxPoolingAttr::KernelDims>(kernel_dims),
-            attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
-        {}
+                            bool ceil_mode = false);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    MaxPooling_Op(const MaxPooling_Op<DIM>& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    MaxPooling_Op(const MaxPooling_Op<DIM>& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MaxPooling_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<MaxPooling_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        if (inputsAssociated()) {
-            std::array<DimSize_t, DIM + 2> outputDims{};
-            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-
-            std::function<float(float)> roundingFunction;
-            if (mAttributes->template getAttr<MaxPoolingAttr::CeilMode>()) {
-                roundingFunction = [](float x) { return std::ceil(x); };
-            } else {
-                roundingFunction = [](float x) { return std::floor(x); };
-            }
-
-            for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
-                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            roundingFunction(static_cast<float>(inputDims[dim+2] -
-                                                                    mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
-                                            static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
-            }
-            outputDims[1] = inputDims[1];
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-            return true;
-        }
-        return false;
-    }
-
-
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
@@ -130,17 +83,15 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string MaxPooling_Op<DIM>::Type = "MaxPooling";
+extern template class Aidge::MaxPooling_Op<1>;
+extern template class Aidge::MaxPooling_Op<2>;
+extern template class Aidge::MaxPooling_Op<3>;
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                            const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                           bool ceil_mode=false) {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
-    return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, ceil_mode), name);
-}
+                                           bool ceil_mode=false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index bb652e833ad06df37f55d3582afd0e66cc3e97c8..a1d90f06f098eb7fa2fc199b595991702daf488a 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -50,40 +50,20 @@ private:
 public:
     Memorize_Op() = delete;
 
-    Memorize_Op(const std::uint32_t endStep)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
-          mAttributes(std::make_shared<Attributes_>(
-                        attr<MemorizeAttr::ScheduleStep>(0),
-                        attr<MemorizeAttr::ForwardStep>(0),
-                        attr<MemorizeAttr::EndStep>(endStep)))
-    {
-        mOutputs[1] = mOutputs[0];
-    }
+    Memorize_Op(const std::uint32_t endStep);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
      * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Memorize_Op(const Memorize_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-        mOutputs[1] = mOutputs[0];
-    }
+    Memorize_Op(const Memorize_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Memorize_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Memorize_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -105,9 +85,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
-}
+std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 744564b4bd591d84b871a6af71c4a54589103485..69f2120d90beb727bd661628c362410066ae3cff 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -50,7 +50,7 @@ public:
     /**
      * Set the node that should be used for the scheduling.
     */
-    void setUpperNode(std::shared_ptr<Node> node) {
+    inline void setUpperNode(std::shared_ptr<Node> node) {
         mUpperNode = node;
     }
 
@@ -58,9 +58,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MetaOperator_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<MetaOperator_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     inline const std::shared_ptr<GraphView>& getMicroGraph() const noexcept {
         return mGraph;
@@ -82,17 +80,7 @@ public:
     }
 
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        if (Registrar<MetaOperator_Op>::exists({name, type()})) {
-            // A custom implementation exists for this meta operator
-            mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
-        }
-
-        // The micro-graph should always be set to the right backend, since it
-        // shares input/output tensors.
-        // Input/output tensors backend are updated here.
-        mGraph->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     void setDataType(const DataType &datatype) const override {
         // The micro-graph should always be set to the right data type, since it
@@ -118,15 +106,9 @@ public:
 
 };
 
-inline std::shared_ptr<Node> MetaOperator(const char *type,
+std::shared_ptr<Node> MetaOperator(const char *type,
                                   const std::shared_ptr<GraphView>& graph,
-                                  const std::string& name = "")
-{
-    auto op = std::make_shared<MetaOperator_Op>(type, graph);
-    auto node = std::make_shared<Node>(op, name);
-    op->setUpperNode(node);
-    return node;
-}
+                                  const std::string& name = "");
 }  // namespace Aidge
 
 #endif /* MetaOperator_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 51681629cbae215fd529b6e7bb568d07264dd63e..bc3348377525cdd2e5b2c030c8fc6b7cb8177e7b 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -33,43 +33,25 @@ namespace Aidge {
 
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
+extern std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
                                   DimSize_t out_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
-{
-    // Construct micro-graph
-    auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
-
-    auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
-    addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
-    if (!no_bias) {
-        addProducer(metaOp, 2, {out_channels}, "b");
-    }
-    return metaOp;
-}
+                                  bool no_bias = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
+extern std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
-{
-    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
-
-    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
-}
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
-inline std::shared_ptr<Node> PaddedConv(
+extern std::shared_ptr<Node> PaddedConv(
     DimSize_t in_channels,
     DimSize_t out_channels,
     DimSize_t const (&kernel_dims)[DIM],
@@ -77,46 +59,25 @@ inline std::shared_ptr<Node> PaddedConv(
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-    bool no_bias = false)
-{
-    return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
-}
+    bool no_bias = false);
 
 ////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
+std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
-{
-    // Construct micro-graph
-    auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
-
-    auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
-    addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
-    if (!no_bias) {
-        addProducer(metaOp, 2, {nb_channels}, "b");
-    }
-    return metaOp;
-}
+                                  bool no_bias = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
+std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
-{
-    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
-
-    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
-}
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
@@ -127,10 +88,7 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-    bool no_bias = false)
-{
-    return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
-}
+    bool no_bias = false);
 
 ////////////////////////////////////////////////////////////////////////////////
 
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index cf5a3f188424fc52849eab580cce624ff714c729..9908911419d8ce027cdb18c4abf45a5c71be67b1 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -35,32 +35,19 @@ class Move_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Move_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {
-        mImpl = std::make_shared<Move_OpImpl>(*this);
-    }
+    Move_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Move_Op(const Move_Op& op)
-        : OperatorTensor(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Move_Op, *this, {op.getInput(0)->getImpl()->backend(), op.backend()});
-        }
-        else {
-            mImpl = std::make_shared<Move_OpImpl>(*this);
-        }
-    }
+    Move_Op(const Move_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Move_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Move_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
@@ -72,9 +59,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Move(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Move_Op>(), name);
-}
-}
+std::shared_ptr<Node> Move(const std::string& name = "");
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index e61393b28fc45bf46487ac2277753dec1b297b81..35a4b7e061bba76f1e63343e9230eddddfde11ac 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -39,23 +39,13 @@ public:
      * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Mul_Op(const Mul_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Mul_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Mul_Op(const Mul_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Mul_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Mul_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -69,9 +59,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Mul(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
-}
+std::shared_ptr<Node> Mul(const std::string& name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index f1e25b7a1f0ba3c07d656d4170a4b2d2bc045e5b..c938fc362aa1f747f5f31bea3fdb08fa851e2333 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -196,8 +196,8 @@ public:
     }
 
     inline InputCategory inputCategory(IOIndex_t idx) const {
-        AIDGE_ASSERT(idx < mInputsCategory.size(), "Input #{} out of range (number of inputs is {})", idx, mInputsCategory.size());
-        return mInputsCategory[idx];
+        // AIDGE_ASSERT(idx < mInputsCategory.size(), "Input #{} out of range (number of inputs is {})", idx, mInputsCategory.size());
+        return mInputsCategory.at(idx);
     }
 
     virtual inline bool isAtomic() const noexcept { return true; }
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 215fafb7fee10587dec38e77685d705f7c1bb980..bdb5330a6fd02693f4d75ccba06ce613d9a0dff1 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -17,10 +17,8 @@
 #include <string>
 #include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
@@ -70,34 +68,12 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pad_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Pad_Op<DIM>>(*this);
-    }
-
+    std::shared_ptr<Operator> clone() const override;
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        if (inputsAssociated()) {
-            std::array<DimSize_t, DIM + 2> outputDims{};
-            const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
 
-            for (std::size_t dim = 0; dim < DIM; ++dim) {
-                outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
-                                    + inputDims[dim+2]
-                                    + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
-            }
-            outputDims[1] = inputDims[1];
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-            return true;
-        }
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-        return false;
-    }
-
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); }
@@ -113,14 +89,10 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
-                                           const std::string& name = "",
-                                           const PadBorderType &borderType = PadBorderType::Constant,
-                                           double borderValue = 0.0)
-{
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
-    return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
-}
+std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
+                        const std::string& name = "",
+                        const PadBorderType &borderType = PadBorderType::Constant,
+                        double borderValue = 0.0);
 
 // helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index fb3b32eeacf2e199df88b6bd0256cf6cbdaa1065..41ab3c537eacc88920419cb5e0deecc4720796ba 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -44,36 +44,19 @@ private:
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    Pop_Op()
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0)))
-    {
-        mImpl = std::make_shared<Pop_OpImpl>(*this);
-    }
+    Pop_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Pop_Op(const Pop_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Pop_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Pop_OpImpl>(*this);
-        }
-    }
+    Pop_Op(const Pop_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pop_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Pop_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -92,9 +75,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Pop(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Pop_Op>(), name);
-}
+std::shared_ptr<Node> Pop(const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index ee5c01c2121d68a7988dc686c4dbb4bbf7331c84..eaf4297fd8b3751463a20ae219af5c25ecd789ae 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -66,9 +66,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Pow(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
-}
+std::shared_ptr<Node> Pow(const std::string& name = "");
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_POW_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index c91869db82206060ecc1039dce5e1784e40ffb4f..0cc94e4562f5ae1af29cc7edb307b7359de018ba 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -45,14 +45,7 @@ public:
 
     template <std::size_t DIM>
     Producer_Op(const std::array<DimSize_t, DIM>& dims,
-                bool constant = false)
-        : OperatorTensor(Type, {}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ProdAttr::Constant>(constant)))
-    {
-        mOutputs[0]->resize(dims);
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+                bool constant = false);
 
     /**
      * @brief Construct a new Producer_Op object from a Tensor.
@@ -82,15 +75,13 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Producer_Op(const Producer_Op&)
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Producer_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
     }
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
+    inline bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
 
     inline bool dimsForwarded() const noexcept override final { return true; }
 
@@ -115,19 +106,11 @@ public:
         Log::debug("Basic Producer backward() function.\n");
     }
 
-    void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const override {
-        if (mAttributes->template getAttr<ProdAttr::Constant>()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
-        }
-        OperatorTensor::setOutput(outputIdx, data);
-    }
+    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override;
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "", bool constant = false) {
-  static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
-  return std::make_shared<Node>(std::make_shared<Producer_Op>(dims, constant), name);
-}
+std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "", bool constant = false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <std::size_t DIM>
@@ -135,20 +118,13 @@ inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::s
   return Producer(to_array(dims), name, constant);
 }
 
-inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "", bool constant = false) {
-  return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor, constant), name);
-}
+std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "", bool constant = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) {
-    assert(inputIdx != gk_IODefaultIndex);
-    static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
-    const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
-    auto prod = Producer(dims, prodName);
-    prod->addChild(otherNode, 0, inputIdx);
-    otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
-    return prod;
-}
+std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode,
+            const IOIndex_t inputIdx,
+            const std::array<DimSize_t, DIM>& dims,
+            const std::string& extension);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <std::size_t DIM>
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 40b5d581d53521e6086d24c5ecc53f725dd9f252..cc714c4619a0f8eee7af03993700fed7489a6c0e 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -36,23 +36,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ReLU_Op(const ReLU_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ReLU_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ReLU_Op(const ReLU_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ReLU_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ReLU_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -65,9 +55,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
-}
+std::shared_ptr<Node> ReLU(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 000607c60e4e3c85671e70a941bd11f3427333dd..43b121be2654c1dd63116075be397e421823b9b5 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -26,8 +26,11 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ReduceMeanAttr { Axes, KeepDims };
+enum class ReduceMeanAttr { Axes, KeepDims, NoopWithEmptyAxes };
 
+/**
+ * @brief This operator has as purpose to reduce given axes by replacing with the mean value.
+*/
 class ReduceMean_Op : public OperatorTensor,
                 public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)> {
 
@@ -37,7 +40,8 @@ public:
 private:
     using Attributes_ = StaticAttributes<ReduceMeanAttr,
                                             std::vector<std::int32_t>,
-                                            DimSize_t>;
+                                            bool,
+                                            bool>;
     template <ReduceMeanAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
@@ -45,35 +49,27 @@ private:
 public:
     ReduceMean_Op() = delete;
 
-    ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ReduceMeanAttr::Axes>(axes),
-            attr<ReduceMeanAttr::KeepDims>(keep_dims)))
-    {}
+    /**
+     * @brief constructor for ReduceMean op
+     * @param[in] axes around which perform the operation
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * if false we remove the dimension completely
+     * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
+     * and if false, we reduce on all axes
+     */
+    ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ReduceMean_Op(const ReduceMean_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    ReduceMean_Op(const ReduceMean_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ReduceMean_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ReduceMean_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -81,7 +77,8 @@ public:
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); }
-    inline DimSize_t& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::NoopWithEmptyAxes>(); }
 
 
     static const std::vector<std::string> getInputsName() {
@@ -101,14 +98,7 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> Node containing the Operator.
  */
-inline std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
-                                        DimSize_t keep_dims=1,
-                                        const std::string& name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
-    return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims), name);
 
-}
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 // template <DimSize_t DIM>
@@ -122,12 +112,16 @@ inline std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
 
 // template <DimIdx_t DIM>
 // const std::string ReduceMean_Op::Type = "ReduceMean";
+std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
+                                        bool keep_dims=true,
+                                        bool noop_with_empty_axes=false,
+                                        const std::string& name = "");
 
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims"};
+const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9d1220b6b2e7c1e8029ebe20b03d5501d90ae0f6
--- /dev/null
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -0,0 +1,135 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_REDUCESUM_H_
+#define AIDGE_CORE_OPERATOR_REDUCESUM_H_
+
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ReduceSumAttr { Axes, KeepDims, NoopWithEmptyAxes };
+
+
+/**
+ * @brief This operator has as purpose to reduce given axes by replacing with the sum value.
+*/
+class ReduceSum_Op : public OperatorTensor,
+                public Registrable<ReduceSum_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceSum_Op &)> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<ReduceSumAttr,
+                                            std::vector<std::int32_t>,
+                                            bool,
+                                            bool>;
+    template <ReduceSumAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    ReduceSum_Op() = delete;
+
+    /**
+     * @brief constructor for ReduceSum op
+     * @param[in] axes around which perform the operation
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * if false we remove the dimension completely
+     * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
+     * and if false, we reduce on all axes
+     */
+    ReduceSum_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ReduceSumAttr::Axes>(axes),
+            attr<ReduceSumAttr::KeepDims>(keep_dims),
+            attr<ReduceSumAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ReduceSum_Op(const ReduceSum_Op& op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ReduceSum_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ReduceSum_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ReduceSum_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::Axes>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::KeepDims>(); }
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::NoopWithEmptyAxes>(); }
+
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+/**
+ * @brief Compute the sum value of a Tensor over the provided axes. Dimensions
+ * may be reduced by erasing the provided axes or not.
+ *
+ * @param axes Dimensions over which data sum should be computed.
+ * @param keep_dims Whether or not reduced dimensions are to be erased.
+ * @param name Name of the Operator.
+ * @return std::shared_ptr<Node> Node containing the Operator.
+ */
+inline std::shared_ptr<Node> ReduceSum(const std::vector<std::int32_t> &axes={},
+                                        bool keep_dims=true,
+                                        bool noop_with_empty_axes=false,
+                                        const std::string& name = "") {
+    // FIXME: properly handle default w&b initialization in every cases
+    AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceSum, not supported");
+    return std::make_shared<Node>(std::make_shared<ReduceSum_Op>(axes, keep_dims, noop_with_empty_axes), name);
+
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 29a08c76c248018fff87a5f765a0b62cbd23b6b7..5bd9b3e8d56c106803bf65dc7bf595da85558a1a 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -47,38 +47,19 @@ private:
 public:
     Reshape_Op() = delete;
 
-    Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ReshapeAttr::Shape>(shape),
-            attr<ReshapeAttr::AllowZero>(allowzero)))
-    {
-        mImpl = std::make_shared<Reshape_OpImpl>(*this);
-    }
+    Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Reshape_Op(const Reshape_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Reshape_OpImpl>(*this);
-        }
-    }
+    Reshape_Op(const Reshape_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Reshape_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Reshape_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -97,12 +78,9 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
-                                     bool allowzero = false,
-                                   	 const std::string &name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape, allowzero), name);
-}
+std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
+                            bool allowzero = false,
+                            const std::string &name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 565affc57ae8e7b1838466733b0f5d8fa8e1a6d6..622a1ff1b191aad9f3f8045380be522d32cf2845 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -30,38 +30,20 @@ class Resize_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Resize_Op()
-        : OperatorTensor(Type,
-            {InputCategory::Data,
-                InputCategory::OptionalData,
-                InputCategory::OptionalData,
-                InputCategory::OptionalData},
-            1) {}
+    Resize_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
      * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-
-    Resize_Op(const Resize_Op& op)
-        : OperatorTensor(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Resize_Op, *this, op.backend());
-        }
-        else {
-            mImpl = nullptr;
-        }
-    }
+    Resize_Op(const Resize_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Resize_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Resize_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -77,10 +59,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Resize(const std::string &name = "") {
-
-    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
-}
+std::shared_ptr<Node> Resize(const std::string &name = "");
 
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 0683a26f6e9d8ef462c2af4693f372b43c33a144..311dc0202d866253bb98285e77e6d6ea8b345e0f 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -42,36 +42,19 @@ private:
 public:
     Scaling_Op() = delete;
 
-    Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ScalingAttr::ScalingFactor>(scalingFactor),
-            attr<ScalingAttr::QuantizedNbBits>(nbBits),
-            attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
-    {}
+    Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Scaling_Op(const Scaling_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Scaling_Op(const Scaling_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Scaling_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Scaling_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -93,13 +76,10 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
 }
 */
-inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
+std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
                                      std::size_t quantizedNbBits=8,
                                      bool isOutputUnsigned=true,
-                                     const std::string& name = "")
-{
-    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
-}
+                                     const std::string& name = "");
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 94f237726e79d8fe7824ff2c9b2f7640bbfc716f..d76a9fd069ebbda81e446e6f3486ff0ff66755bb 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -49,38 +49,19 @@ private:
 public:
     Shape_Op() = delete;
 
-    Shape_Op(const std::int64_t start, const std::int64_t end)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<ShapeAttr::Start>(start),
-            attr<ShapeAttr::End>(end)))
-    {
-        mImpl = std::make_shared<Shape_OpImpl>(*this);
-    }
+    Shape_Op(const std::int64_t start, const std::int64_t end);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Shape_Op(const Shape_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Shape_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Shape_OpImpl>(*this);
-        }
-    }
+    Shape_Op(const Shape_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Shape_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Shape_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -98,9 +79,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
-}
+std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "");
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
index 879edcac6a7ed9a78a2db8d82994071a6cf09635..4d3000750c2224aaea278beca4c8124e0845042e 100644
--- a/include/aidge/operator/ShiftGELU.hpp
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -32,29 +32,19 @@ class ShiftGELU_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    ShiftGELU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    ShiftGELU_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ShiftGELU_Op(const ShiftGELU_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ShiftGELU_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ShiftGELU_Op(const ShiftGELU_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ShiftGELU_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ShiftGELU_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -67,9 +57,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> ShiftGELU(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<ShiftGELU_Op>(), name);
-}
+std::shared_ptr<Node> ShiftGELU(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SHIFTGELU_H_ */
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
index f171130213b2e51ca8fc9905d93944198f849ce7..d75e6559f5f4df9a1010d65ba97529e6165ae42f 100644
--- a/include/aidge/operator/ShiftMax.hpp
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -32,29 +32,19 @@ class ShiftMax_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    ShiftMax_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    ShiftMax_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ShiftMax_Op(const ShiftMax_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ShiftMax_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ShiftMax_Op(const ShiftMax_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ShiftMax_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ShiftMax_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -67,9 +57,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> ShiftMax(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<ShiftMax_Op>(), name);
-}
+std::shared_ptr<Node> ShiftMax(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SHIFTMAX_H_ */
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index ae82d4a3a2d29755bba22b9a4194284310ac4f84..b3204240cd130251fe8abe7d50bdad9b92b7558c 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -30,30 +30,11 @@ class Sigmoid_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Sigmoid_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    Sigmoid_Op();
 
-    /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
-     */
-    Sigmoid_Op(const Sigmoid_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Sigmoid_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Sigmoid_Op>(*this);
-    }
+    Sigmoid_Op(const Sigmoid_Op& op);
 
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -65,9 +46,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Sigmoid(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Sigmoid_Op>(), name);
-}
+std::shared_ptr<Node> Sigmoid(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 04a67fe98f7682737bff6df18f28d568ee33e093..241e165a0e441ccb856431225ce1d6fd170a25f8 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -45,14 +45,10 @@ private:
 public:
     Slice_Op() = delete;
 
-    Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>& ends, const std::vector<std::int8_t>& axes, const std::vector<std::int64_t>& steps)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<SliceAttr::Starts>(starts),
-            attr<SliceAttr::Ends>(ends),
-            attr<SliceAttr::Axes>(axes),
-            attr<SliceAttr::Steps>(steps)))
-    {}
+    Slice_Op(const std::vector<std::int64_t>& starts,
+            const std::vector<std::int64_t>& ends,
+            const std::vector<std::int8_t>& axes,
+            const std::vector<std::int64_t>& steps);
 
 
     /**
@@ -60,24 +56,14 @@ public:
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Slice_Op(const Slice_Op &op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Slice_Op, *this, op.backend());
-        }
-        else {
-            mImpl = nullptr;
-        }
-    }
+    Slice_Op(const Slice_Op &op);
 
 public:
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Slice_Op
      */
-    std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = true) override final;
@@ -104,13 +90,11 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
-inline std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
+std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
                                    const std::vector<std::int64_t>& ends = {},
                                    const std::vector<std::int8_t>& axes = {},
                                    const std::vector<std::int64_t>& steps = {},
-                                   const std::string &name = "") {
-    return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes, steps), name);
-}
+                                   const std::string &name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 0b7a8e57193439872c6fcc2699b9f5e55c643961..c221a67e31fc6de1bcb2c727854c8ebee2986ee4 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -42,34 +42,19 @@ private:
 public:
     Softmax_Op() = delete;
 
-    Softmax_Op(std::int32_t axis)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-                attr<SoftmaxAttr::Axis>(axis)))
-    {}
+    Softmax_Op(std::int32_t axis);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Softmax_Op(const Softmax_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    Softmax_Op(const Softmax_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Softmax_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Softmax_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -85,9 +70,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
-}
+std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "");
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 7bdec1579c8a8f46640de5caf42c01568d208059..661f9e32d47c7fb7e0c111805a50c6fcc131cffe 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -47,14 +47,7 @@ private:
 public:
     Split_Op() = delete;
 
-    Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<SplitAttr::Axis>(axis),
-            attr<SplitAttr::Split>(split)))
-    {
-        mImpl = std::make_shared<Split_OpImpl>(*this);
-    }
+    Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split);
 
 
     /**
@@ -62,23 +55,14 @@ public:
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Split_Op(const Split_Op &op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Split_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Split_OpImpl>(*this);
-        }
-    }
+    Split_Op(const Split_Op &op);
+
 public:
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Split_Op
      */
-    std::shared_ptr<Operator> clone() const override { return std::make_shared<Split_Op>(*this); }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -103,12 +87,10 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
-inline std::shared_ptr<Node> Split(DimSize_t nbOutput,
+std::shared_ptr<Node> Split(DimSize_t nbOutput,
                                    std::int8_t axis = 0,
                                    const std::vector<DimSize_t>& split = {},
-                                   const std::string &name = "") {
-    return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
-}
+                                   const std::string &name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 05b20286bc3f576d4e43fbece26ae270b3e583e6..ce4aaafc92d1f7d601946c02d4eb025eb735a3f9 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -14,8 +14,8 @@
 
 #include <memory>
 #include <vector>
+#include <string>
 
-#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -24,12 +24,9 @@
 namespace Aidge {
 
 class Sqrt_Op : public OperatorTensor,
-    public Registrable<Sqrt_Op, std::string, std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
+                public Registrable<Sqrt_Op,
+                                std::string,
+                                std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
 public:
     static const std::string Type;
 
@@ -39,23 +36,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Sqrt_Op(const Sqrt_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Sqrt_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    Sqrt_Op(const Sqrt_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Sqrt_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Sqrt_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
@@ -67,9 +54,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Sqrt(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
-}
+std::shared_ptr<Node> Sqrt(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SQRT_H_ */
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index ba5a021c30f13bbc2ae73c90078548c5b677a3a5..bb29ba67851bce8eed46ab1d4df3cf7a8ce91a1a 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -26,37 +26,23 @@ namespace Aidge {
 
 class Sub_Op : public OperatorTensor,
     public Registrable<Sub_Op, std::string, std::shared_ptr<OperatorImpl>(const Sub_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
 public:
     static const std::string Type;
 
+public:
     Sub_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Sub_Op(const Sub_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Sub_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Sub_Op(const Sub_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Sub_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Sub_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -71,9 +57,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Sub(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
-}
+std::shared_ptr<Node> Sub(const std::string& name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index b5f183a90aeeb4ef424c318e8942a818b568b44a..fd05bf7c434ec2547995800f47380c53585ca6d7 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -28,29 +28,19 @@ class Tanh_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Tanh_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    Tanh_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Tanh_Op(const Tanh_Op& op)
-        : OperatorTensor(op)
-    {
-       if (op.mImpl){
-            SET_IMPL_MACRO(Tanh_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Tanh_Op(const Tanh_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Tanh_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Tanh_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -63,9 +53,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Tanh(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Tanh_Op>(), name);
-}
+std::shared_ptr<Node> Tanh(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_TANH_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index efd9e1792d530f45754809913a7c648d82c7985e..375d6e098324516b750f8054f9214390373737e2 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -50,37 +50,19 @@ private:
 public:
     Transpose_Op() = delete;
 
-    Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder)
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)))
-    {
-        mImpl = std::make_shared<TransposeImpl>(*this);
-    }
+    Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Transpose_Op(const Transpose_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<TransposeImpl>(*this);
-        }
-    }
+    Transpose_Op(const Transpose_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Transpose_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Transpose_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -97,10 +79,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
-                                           const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
-}
+std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
+                                           const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 58cbcd2d756ad44ef2ec6a38d46909a114b187c2..3fda7c21405ef023f4324089e60be0330b5f34b6 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -57,42 +57,22 @@ private:
 public:
     Unfold_Op() = delete;
 
-    constexpr Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims,
-                      const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, {InputCategory::Data}, 1),
-          mAttributes(std::make_shared<Attributes_>(
-            attr<UnfoldAttr::StrideDims>(strideDims),
-            attr<UnfoldAttr::DilationDims>(dilationDims),
-            attr<UnfoldAttr::KernelDims>(kernelDims)))
-    {
-        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);  
-    }
+    Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims,
+            const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+            const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Unfold_Op(const Unfold_Op<DIM> &op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);  
-        }
-    }
+    Unfold_Op(const Unfold_Op<DIM> &op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Unfold_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Unfold_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -112,14 +92,10 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Unfold(const std::array<DimSize_t, DIM> &kernelDims,
+std::shared_ptr<Node> Unfold(const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
-    return std::make_shared<Node>(std::make_shared<Unfold_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
-}
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
 
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> Unfold(
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index e33abcaebc02e8bcdd002efb7c2d8fe45d883906..c42b285dacb6c59c5fa30388c268f1680152a5e0 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -31,18 +31,14 @@ void constantFolding(std::shared_ptr<GraphView> graph);
  *
  * @param nodes Strict set of Node to merge.
  */
-//void fuseMulAdd(std::set<std::shared_ptr<Node>> nodes);
-
-void fuseMulAdd(std::shared_ptr<MatchSolution> solution);
-
-void fuseMulAdd(std::shared_ptr<Node> matmul,std::shared_ptr<Node> add);
+void matMulToFC(std::shared_ptr<Node> matmul, std::shared_ptr<Node> add = nullptr);
 
 /**
  * @brief Merge ``MatMul`` and :cpp:function:`Aidge::Add` Node into a :cpp:function:`Aidge::FC` Node.
  *
  * @param graphView Graph view to use graph matching on, in order to apply transformations.
  */
-void fuseMulAdd(std::shared_ptr<GraphView> graphView);
+void matMulToFC(std::shared_ptr<GraphView> graphView);
 
 /**
  * @brief Remove a node type.
@@ -98,10 +94,6 @@ void removeFlatten(std::shared_ptr<GraphView> graphView);
  */
 void fuseBatchNorm(std::shared_ptr<Node> conv,std::shared_ptr<Node> batchnorm);
 
-
-
-void fuseBatchNorm(std::shared_ptr<MatchSolution> solution);
-
 /**
  * @brief Fuse :cpp:function:`Aidge::BatchNorm` with :cpp:function:`Aidge::Conv` or :cpp:function:`Aidge::FC` Nodes.
  * Ref: https://nenadmarkus.com/p/fusing-batchnorm-and-conv/
diff --git a/include/aidge/scheduler/MemoryManager.hpp b/include/aidge/scheduler/MemoryManager.hpp
index 94add56e8afdebb8e42f7ae49a32da2aeed9e9cb..2e397d1dbaa1cc8d8f586d15363cbd2245963152 100644
--- a/include/aidge/scheduler/MemoryManager.hpp
+++ b/include/aidge/scheduler/MemoryManager.hpp
@@ -19,6 +19,25 @@
 #include "aidge/graph/Node.hpp"
 
 namespace Aidge {
+/**
+ * @brief The MemoryManager can be used to generate an optimized static memory 
+ * layout for a computing graph in a global memory space.
+ * The are some assumptions:
+ * - A MemoryManager represents a single global memory space, filled with 
+ *   contiguous, non-overlapping MemorySpace chunks.
+ * - A MemorySpace contains one or multiple MemoryPlane, each MemoryPlane
+ *   corresponding to the allocation of a specific Tensor. When a Tensor can re-
+ *   use the memory of the preceding one (for in-place or partially in-place
+ *   operators), multiple overlapping MemoryPlane can be created in the same 
+ *   MemorySpace (remember, MemorySpace **cannot** be overlapping!).
+ * - A MemoryPlane is tailored for handling (N)HWC data with two properties:
+ *   - Possibility of wrapping: on the H axis (each W*C block is contiguous).
+ *   - Possibility of concatenation: on the C axis (C1+C2+...+Cn).
+ * - All the sizes and offets specified in a MemoryManager are expressed in
+ *   number of data elements, or **words**, meaning currently a uniform data 
+ *   precision is expected in a MemoryManager (for instance, if the precision is
+ *   16-bits, each data element will be 2 bytes, which will be the size of a word).
+ */
 class MemoryManager {
 public:
     typedef int Clock_T;
@@ -45,18 +64,45 @@ public:
             allocated(clock_),
             released(-1) {}
 
+        /// Offset of the MemorySpace in the MemoryManager global memory space (in words)
         unsigned int offset;
+        /// Size of the MemorySpace (in words)
         unsigned int size;
         std::set<std::shared_ptr<Node> > dependencies;
         Clock_T allocated;
         Clock_T released;
     };
 
-    // MemoryPlane belongs to a MemorySpace. Any number of potentially
-    // overlapping planes can be associated to a MemorySpace.
-    // MemoryPlane can be non-contiguous (in case of stride, or wrapping, when
-    // offset + size > memSpace.size).
-    // MemoryPlane cannot be re-arranged inside a MemorySpace.
+    /**
+     * @brief MemoryPlane belongs to a MemorySpace. Any number of potentiall
+     * overlapping planes can be associated to a MemorySpace.
+     * MemoryPlane can be non-contiguous (in case of stride, or wrapping, when
+     * offset + size > memSpace.size).
+     * MemoryPlane cannot be re-arranged inside a MemorySpace.
+     * 
+     * A MemoryPlane is tailored for handling (N)HWC data with two properties:
+     * - Possibility of wrapping: on the H axis (each W*C block is contiguous).
+     * - Possibility of concatenation: on the C axis (C1+C2+...+Cn).
+     * 
+     * Detail of (N)HWC data handling:
+     * - \p length is the size of contiguous and non-breakable memory line (W in HWC);
+     * - \p count is the number of memory lines of size \p length constituting a memory block (H in HWC);
+     * - \p stride is the number of channels, or memory blocks, *in total*, 
+     *   of \p count lines of size \p length (C in NHWC);
+     * - \p size is the number of channels, or memory blocks, *in this MemoryPlane*,
+     *   of \p count lines of size \p length.
+     *   In the case of concatenation, there can be multiple overlapping MemoryPlane
+     *   with different size, like NHWC = NHW(C1+C2):
+     *   - MemoryPlane#1: \p size = C1 and \p stride = C=C1+C2
+     *   - MemoryPlane#2: \p size = C2 and \p stride = C=C1+C2
+     *                    (with an additionnal relative offset of +C1)
+     * In this mode, wrapping can only occur on the H (\p count) axis. W*C chunks
+     * are garanteed to be contiguous (\p length * \p stride).
+     * 
+     * By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
+     * there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
+     * In this case, \p size is the total size of the MemoryPlane (H*W*C, in words).
+     */
     struct MemoryPlane {
         MemoryPlane(std::shared_ptr<MemorySpace> memSpace_,
                     Clock_T clock_,
@@ -92,36 +138,91 @@ public:
                 <= memSpace->offset + memSpace->size);
         }
 
+        /**
+         * @brief Get the total size of the MemoryPlane, including the stride.
+         * 
+         * @return unsigned int Total size in words
+         */
         inline unsigned int getSize() const {
             return stride * length * count;
         }
 
+        /**
+         * @brief Get the useful size of the MemoryPlane, as if its memory blocks
+         * were contiguous, without stride.
+         * 
+         * @return unsigned int Useful size in words
+         */
         inline unsigned int getUsefulSize() const {
             return size * length * count;
         }
 
+        /**
+         * @brief Get the absolute offset of the beginning of the memory plane.
+         * 
+         * @return unsigned int Contiguous offset in words
+         */
         inline unsigned int getContiguousOffset() const {
             return memSpace->offset + offset;
         }
 
+        /**
+         * @brief Get the size of the contiguous part of the memory plane, from
+         * its beginning to the limit of the MemorySpace size.
+         * If the MemoryPlane fill the MemorySpace without wrapping, the contiguous
+         * size will be the same as the total size of the MemoryPlane.
+         * 
+         * @return unsigned int Contiguous size in words
+         */
         inline unsigned int getContiguousSize() const {
             return std::min(getSize(), getLimit());
         }
 
+        /**
+         * @brief Get the absolute offset of the wrapped part of the memory plane.
+         * Since the wrapped part of the memory plane begins at the beginning of
+         * the MemorySpace, the returned offset is always the same as the MemorySpace
+         * offset.
+         * 
+         * @return unsigned int Wrapped offset in words
+         */
         inline unsigned int getWrappedOffset() const {
             return memSpace->offset;
         }
 
+        /**
+         * @brief Get the size of the wrapped part of the memory plane, from
+         * the beginning of the MemorySpace to the total size of the MemoryPlane,
+         * including the stride.
+         * If the MemoryPlane fill the MemorySpace without wrapping, the wrapped
+         * size will 0.
+         * 
+         * @return unsigned int Wrapped size in words
+         */
         inline unsigned int getWrappedSize() const {
             return getSize() - getContiguousSize();
         }
 
+        /**
+         * @brief Get the absolute offset after the end of the memory plane (if it
+         * is wrapped, the offset will correspond to the end of the wrapped part).
+         * The word at the final offset is not included in the MemoryPlane.
+         * 
+         * @return unsigned int Final offset in words
+         */
         inline unsigned int getFinalOffset() const {
             return (getWrappedSize() > 0)
                 ? getWrappedOffset() + getWrappedSize()
                 : getContiguousOffset() + getContiguousSize();
         }
 
+        /**
+         * @brief Get the absolute offset after the end of the contiguous part
+         * of the memory plane.
+         * The word at the upper offset is not included in the MemoryPlane.
+         * 
+         * @return unsigned int Upper offset in words
+         */
         inline unsigned int getUpperOffset() const {
             return (getContiguousOffset() + getContiguousSize());
         }
@@ -146,10 +247,29 @@ public:
 
         std::shared_ptr<MemorySpace> memSpace;
         Clock_T allocated;
+        /// Relative offset of the MemoryPlane in the MemorySpace (in words)
         unsigned int offset;
+        /// Number of channels, or memory blocks, *in this MemoryPlane*,
+        /// of \p count lines of size \p length.
+        /// In the case of concatenation, there can be multiple overlapping MemoryPlane
+        /// with different size, like NHWC = NHW(C1+C2):
+        /// - MemoryPlane#1: \p size = C1 and \p stride = C=C1+C2
+        /// - MemoryPlane#2: \p size = C2 and \p stride = C=C1+C2
+        ///                  (with an additionnal relative offset of +C1)
+        /// By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
+        /// there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
+        /// In this case, \p size is the total size of the MemoryPlane (H*W*C, in words).
         unsigned int size;
+        /// Number of channels, or memory blocks *in total*,
+        /// of \p count lines of size \p length (the C in NHWC).
+        /// There should be C blocks of H*W size.
         unsigned int stride;
+        /// Size of an elementary, contiguous and non-breakable, memory line 
+        /// (the W in NHWC), in words. A MemoryPlane wrapping cannot occur in
+        /// the middle of a memory line.
         unsigned int length;
+        /// Number of memory lines of size \p length constituting a memory block
+        /// (the H in NHWC). The size of a memory block is H*W.
         unsigned int count;
     };
 
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index f198e83fbacdc2cceee1c947d0c17244d4c9953e..6b2ace1c6aa013ae81e5144665e2edde830cdc54 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -74,7 +74,7 @@ public:
      * inducing no runtime overhead for Release.
     */
     template <typename... Args>
-    constexpr static void debug(Args&&... args) {
+    static void debug(Args&&... args) {
 #ifndef NDEBUG
         // only when compiled in Debug
         log(Debug, fmt::format(std::forward<Args>(args)...));
@@ -90,7 +90,7 @@ public:
      * performed nominally.
     */
     template <typename... Args>
-    constexpr static void info(Args&&... args) {
+    static void info(Args&&... args) {
         log(Info, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -101,7 +101,7 @@ public:
      * performed normally.
     */
     template <typename... Args>
-    constexpr static void notice(Args&&... args) {
+    static void notice(Args&&... args) {
         log(Notice, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -112,7 +112,7 @@ public:
      * still provide an exploitable result.
     */
     template <typename... Args>
-    constexpr static void warn(Args&&... args) {
+    static void warn(Args&&... args) {
         log(Warn, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -123,7 +123,7 @@ public:
      * further operations.
     */
     template <typename... Args>
-    constexpr static void error(Args&&... args) {
+    static void error(Args&&... args) {
         log(Error, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -134,14 +134,14 @@ public:
      * impossible.
     */
     template <typename... Args>
-    constexpr static void fatal(Args&&... args) {
+    static void fatal(Args&&... args) {
         log(Fatal, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
      * Set the minimum log level displayed in the console.
     */
-    constexpr static void setConsoleLevel(Level level) {
+    static void setConsoleLevel(Level level) {
         mConsoleLevel = level;
     }
 
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 47bb05ce7c435eda7e975a58544a416182c3853b..872c3f6b5a258292c41428852580210ab32decbf 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -75,11 +75,9 @@ struct Registrar {
         return (C::registry().find(key) != C::registry().cend());
     }
 
-    static auto create(const registrar_key& key){
-        const auto it = C::registry().find(key);
-        AIDGE_ASSERT(it != C::registry().cend(), "missing or invalid registrar key: {} for registrable object {}\nDid you include/import the corresponding module?\nIf so, it is possible that the object is not yet supported.", key, typeid(C).name());
-
-        return (*it).second;
+    static auto create(const registrar_key& key) {
+        AIDGE_ASSERT(exists(key), "missing or invalid registrar key: {} for registrable object {}\nDid you include/import the corresponding module?\nIf so, it is possible that the object is not yet supported.", key, typeid(C).name());
+        return C::registry()[key];
     }
     static std::vector<registrar_key> getKeys(){
         std::vector<registrar_key> keys;
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 3bb41b5bb0d9c2727d95a2656a1a2d5b96ff950b..18e75b7cef5a2e9e9568a900f826a31c87012318 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -158,7 +158,11 @@ public:
                 std::enable_if_t<(SIZE > 0), bool> = true>
     constexpr const std::type_info& getAttrType(std::size_t i) const {
         if (i == SIZE-1) {
-            return typeid(typename std::tuple_element<SIZE-1,std::tuple<T...>>::type);
+            // Workaround for NVCC from 12.2.1 to 12.4.1
+            // error: no suitable constructor exists to convert from "const char *" to "std::type_info"
+            typename std::tuple_element<SIZE-1,std::tuple<T...>>::type dummy{};
+            return typeid(dummy);
+            //return typeid(typename std::tuple_element<SIZE-1,std::tuple<T...>>::type);
         }
         else {
             return getAttrType<SIZE-1>(i);
diff --git a/python_binding/data/pybind_DataProvider.cpp b/python_binding/data/pybind_DataProvider.cpp
index 2f652aff5008f8008952ffb1bb6fb1738021b436..c0b7218cdfd69d2ad0d8493a99833b80785c9d39 100644
--- a/python_binding/data/pybind_DataProvider.cpp
+++ b/python_binding/data/pybind_DataProvider.cpp
@@ -27,7 +27,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> DataProvider::next() {
 void init_DataProvider(py::module& m){
 
     py::class_<DataProvider, std::shared_ptr<DataProvider>>(m, "DataProvider")
-        .def(py::init<Database&, std::size_t, bool, bool>(), py::arg("database"), py::arg("batch_size"), py::arg("shuffle"), py::arg("drop_last"))
+        .def(py::init<Database&, std::size_t, std::string, bool, bool>(), py::arg("database"), py::arg("batch_size"), py::arg("backend"), py::arg("shuffle"), py::arg("drop_last"))
         .def("__iter__", &DataProvider::iter)
         .def("__next__", &DataProvider::next)
         .def("__len__", &DataProvider::getNbBatch);
diff --git a/python_binding/operator/pybind_And.cpp b/python_binding/operator/pybind_And.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..08dddfc8168bb77086a3dd72aca45b110a4cbce9
--- /dev/null
+++ b/python_binding/operator/pybind_And.cpp
@@ -0,0 +1,34 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/And.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_And(py::module& m) {
+    py::class_<And_Op, std::shared_ptr<And_Op>, OperatorTensor>(m, "AndOp", py::multiple_inheritance(),
+          R"mydelimiter( Initialize an And operator.)mydelimiter")
+    .def(py::init<>())
+    .def_static("get_inputs_name", &And_Op::getInputsName)
+    .def_static("get_outputs_name", &And_Op::getOutputsName);
+    declare_registrable<And_Op>(m, "AndOp");
+    m.def("And", &And, py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing an And operator.
+			:param name : name of the node.
+		)mydelimiter");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_ArgMax.cpp b/python_binding/operator/pybind_ArgMax.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3de54afd7a669347cc2b272cff9b87cf152be09a
--- /dev/null
+++ b/python_binding/operator/pybind_ArgMax.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/ArgMax.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_ArgMax(py::module &m) {
+  const std::string pyClassName("ArgMaxOp");
+  py::class_<ArgMax_Op, std::shared_ptr<ArgMax_Op>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize an ArgMax operator.
+			:param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axis: int
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param select_last_index: If True, selects the last index if there are multiple occurrences 
+									of the max value. If False (default), selects the first occurrence.
+			:type select_last_index: bool
+		)mydelimiter")
+    .def(py::init<std::int32_t, bool, bool>(), py::arg("axis"), py::arg("keep_dims"), py::arg("select_last_index"))
+    .def_static("get_inputs_name", &ArgMax_Op::getInputsName)
+    .def_static("get_outputs_name", &ArgMax_Op::getOutputsName)
+    ;
+  declare_registrable<ArgMax_Op>(m, pyClassName);
+
+  m.def("ArgMax", [](std::int32_t axis,
+                    bool keepDims,
+                    bool selectLastIndex,
+                    const std::string& name) {
+        return ArgMax(axis, keepDims, selectLastIndex, name);
+    }, py::arg("axis") = 0,
+       py::arg("keep_dims") = true,
+       py::arg("select_last_index") = false,
+       py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing an ArgMax operator.
+			:param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axis: int
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param select_last_index: If True, selects the last index if there are multiple occurrences 
+									of the max value. If False (default), selects the first occurrence.
+			:type select_last_index: bool
+			:param name : name of the node.
+		)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..49e74f4cbab90f141af5e76df7fbdef6e3794146
--- /dev/null
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -0,0 +1,81 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/GridSample.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp" // declare_registrable
+
+
+static typename Aidge::GridSample_Op::Mode stringToInterpolationMode(const std::string& mode) {
+    static std::unordered_map<std::string, typename Aidge::GridSample_Op::Mode> map = {
+        {"linear", Aidge::GridSample_Op::Mode::Linear},
+        {"nearest", Aidge::GridSample_Op::Mode::Nearest},
+        {"cubic", Aidge::GridSample_Op::Mode::Cubic}
+    };
+    return map[mode];
+}
+
+static typename Aidge::GridSample_Op::PaddingMode stringToPaddingMode(const std::string& mode) {
+    static std::unordered_map<std::string, typename Aidge::GridSample_Op::PaddingMode> map = {
+        {"zeros", Aidge::GridSample_Op::PaddingMode::Zeros},
+        {"border", Aidge::GridSample_Op::PaddingMode::Border},
+        {"reflection", Aidge::GridSample_Op::PaddingMode::Reflection}
+    };
+    return map[mode];
+}
+
+namespace py = pybind11;
+namespace Aidge {
+
+void declare_GridSampleOp(py::module &m) {
+  const std::string pyClassName("GridSampleOp");
+  py::class_<GridSample_Op, std::shared_ptr<GridSample_Op>, OperatorTensor>(
+    m, pyClassName.c_str(),
+    py::multiple_inheritance())
+        .def(py::init([](const std::string& mode,
+                         const std::string& padding_mode,
+                         bool align_corners) {
+            return new GridSample_Op(stringToInterpolationMode(mode), stringToPaddingMode(padding_mode), align_corners);
+        }), py::arg("mode") = "linear",
+            py::arg("padding_mode") = "zeros",
+            py::arg("alogn_corners") = false)
+        .def_static("get_inputs_name", &GridSample_Op::getInputsName)
+        .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
+        ;
+
+  declare_registrable<GridSample_Op>(m, pyClassName);
+
+  m.def("GridSample", [](const std::string& mode,
+                        const std::string& padding_mode,
+                        bool align_corners,
+                        const std::string& name) {
+        return GridSample(stringToInterpolationMode(mode), stringToPaddingMode(padding_mode), align_corners, name);
+    }, py::arg("mode"),
+       py::arg("padding_mode"),
+       py::arg("align_corners"),
+       py::arg("name") = "");
+}
+
+
+void init_GridSample(py::module &m) {
+  declare_GridSampleOp(m);
+}
+
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 3023c077e2f3695902ca76dfa21831749f0ca82e..0fceed204152e214cc40495a0bafb9bfc000f0c0 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -27,22 +27,49 @@ namespace Aidge {
 void declare_ReduceMeanOp(py::module &m) {
   const std::string pyClassName("ReduceMeanOp");
   py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, OperatorTensor>(
-    m, pyClassName.c_str(), py::multiple_inheritance())
-    .def(py::init<std::vector<std::int32_t>, DimSize_t>(), py::arg("axes"), py::arg("keep_dims"))
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+		)mydelimiter")
+    .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes") = std::vector<std::int32_t>(), py::arg("keep_dims") = true, py::arg("noop_with_empty_axes") = false)
     .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
     ;
   declare_registrable<ReduceMean_Op>(m, pyClassName);
 
   m.def("ReduceMean", [](const std::vector<int>& axes,
-                                                                DimSize_t keepDims,
-                                                                const std::string& name) {
+                          bool keepDims,
+                          bool noopWithEmptyAxes,
+                          const std::string& name) {
         // AIDGE_ASSERT(axes.size() == DIM, "axes size [{}] does not match DIM [{}]", axes.size(), DIM);
 
-        return ReduceMean(axes, keepDims, name);
-    }, py::arg("axes"),
-       py::arg("keep_dims") = 1,
-       py::arg("name") = "");
+        return ReduceMean(axes, keepDims, noopWithEmptyAxes, name);
+    }, py::arg("axes") = std::vector<std::int32_t>(),
+       py::arg("keep_dims") = true,
+       py::arg("noop_with_empty_axes") = false,
+       py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+			:param name : name of the node.
+		)mydelimiter");
 }
 
 
diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..eaa57ef1c663a03cfd59ce02c13c3c7028b69e01
--- /dev/null
+++ b/python_binding/operator/pybind_ReduceSum.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/ReduceSum.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_ReduceSum(py::module &m) {
+  const std::string pyClassName("ReduceSumOp");
+  py::class_<ReduceSum_Op, std::shared_ptr<ReduceSum_Op>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+		)mydelimiter")
+    .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes"), py::arg("keep_dims"), py::arg("noop_with_empty_axes"))
+    .def_static("get_inputs_name", &ReduceSum_Op::getInputsName)
+    .def_static("get_outputs_name", &ReduceSum_Op::getOutputsName)
+    ;
+  declare_registrable<ReduceSum_Op>(m, pyClassName);
+
+  m.def("ReduceSum", [](const std::vector<int>& axes,
+                        bool keepDims,
+                        bool noopWithEmptyAxes,
+                        const std::string& name) {
+        return ReduceSum(axes, keepDims, noopWithEmptyAxes, name);
+    }, py::arg("axes") = std::vector<std::int32_t>(),
+       py::arg("keep_dims") = true,
+       py::arg("noop_with_empty_axes") = false,
+       py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+			:param name : name of the node.
+		)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index 9425eba06574c73339e8e4628ffded3449a8b4ab..9fae2cef29748482dfeabe173d946c6446a60a35 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -11,7 +11,6 @@
 
 #include <pybind11/pybind11.h>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index c5408a47666092a1df329bc560aceb4fdd7584a8..1f273baba76245616f7c96638c3941fc37bec0db 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -29,6 +29,8 @@ void init_OperatorTensor(py::module&);
 
 // operators
 void init_Add(py::module&);
+void init_And(py::module&);
+void init_ArgMax(py::module&);
 void init_AvgPooling(py::module&);
 void init_BatchNorm(py::module&);
 void init_Concat(py::module&);
@@ -40,6 +42,7 @@ void init_FC(py::module&);
 void init_Gather(py::module&);
 void init_GenericOperator(py::module&);
 void init_GlobalAveragePooling(py::module&);
+void init_GridSample(py::module&);
 void init_Identity(py::module&);
 void init_LeakyReLU(py::module&);
 void init_MatMul(py::module&);
@@ -51,6 +54,7 @@ void init_Pad(py::module&);
 void init_Pop(py::module&);
 void init_Pow(py::module&);
 void init_ReduceMean(py::module&);
+void init_ReduceSum(py::module&);
 void init_ReLU(py::module&);
 void init_Reshape(py::module&);
 void init_Resize(py::module&);
@@ -102,6 +106,8 @@ void init_Aidge(py::module& m) {
     init_Operator(m);
     init_OperatorTensor(m);
     init_Add(m);
+    init_And(m);
+    init_ArgMax(m);
     init_AvgPooling(m);
     init_BatchNorm(m);
     init_Concat(m);
@@ -113,6 +119,7 @@ void init_Aidge(py::module& m) {
     init_Gather(m);
     init_GenericOperator(m);
     init_GlobalAveragePooling(m);
+    init_GridSample(m);
     init_LeakyReLU(m);
     init_MatMul(m);
     init_MaxPooling(m);
@@ -123,6 +130,7 @@ void init_Aidge(py::module& m) {
     init_Pop(m);
     init_Pow(m);
     init_ReduceMean(m);
+    init_ReduceSum(m);
     init_ReLU(m);
     init_Reshape(m);
     init_Resize(m);
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index c0392287a756b6272a59275b6d12b3a70c1c9420..b68dfd035921a1dce4d12b9071a8df194e2ffdd5 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -21,18 +21,18 @@
 namespace py = pybind11;
 
 namespace Aidge {
-void init_Recipes(py::module &m) 
+void init_Recipes(py::module &m)
 {
 
 
-  m.def("fuse_mul_add", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseMulAdd), py::arg("graph_view"), R"mydelimiter(
+  m.def("matmul_to_fc", static_cast<void(*)(std::shared_ptr<GraphView>)>(matMulToFC), py::arg("graph_view"), R"mydelimiter(
     Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
     :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
-  // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
+  // m.def("matmul_to_fc", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(matMulToFC), py::arg("nodes"), R"mydelimiter(
   //   recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
   //   :param nodes: The MatMul and Add nodes to fuse.
@@ -71,9 +71,10 @@ void init_Recipes(py::module &m)
     )mydelimiter");
 
   m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
-    Recipe to remove a flatten operator.
+    Recipe to remove a Flatten operator if it is followed by a FC or a MatMul.
+    The recipe can remove multiple Flatten operator if they are one after the other.
 
-    :param graph_view: Graph view on which we want to apply the recipe
+    :param graph_view: Graph view on which we want to apply the recipe.
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
@@ -84,13 +85,6 @@ void init_Recipes(py::module &m)
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
-  // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
-  //   Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
-
-  //   :param nodes: The MatMul and Add nodes to fuse.
-  //   :type nodes: list of :py:class:`aidge_core.Node`
-  //   )mydelimiter");
-
   m.def("fuse_batchnorm", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseBatchNorm), py::arg("graph_view"), R"mydelimiter(
     Recipe to remove a flatten operator.
 
diff --git a/src/data/DataProvider.cpp b/src/data/DataProvider.cpp
index fc6b842edef17c80a4ef80667fc814bf85df25a4..7f4eb71aa1f1e05c42aef8090988d0ea05aa6cb2 100644
--- a/src/data/DataProvider.cpp
+++ b/src/data/DataProvider.cpp
@@ -23,9 +23,10 @@
 #include "aidge/utils/Random.hpp"
 
 
-Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize, const bool shuffle, const bool dropLast)
+Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize, const std::string& backend, const bool shuffle, const bool dropLast)
     : mDatabase(database),
       mBatchSize(batchSize),
+      mBackend(backend),
       mShuffle(shuffle),
       mDropLast(dropLast),
       mNumberModality(database.getItem(0).size()),
@@ -63,7 +64,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con
         dataBatchDims[i].insert(dataBatchDims[i].begin(), current_batch_size);
         auto batchData = std::make_shared<Tensor>();
         batchData->resize(dataBatchDims[i]);
-        batchData->setBackend("cpu");
+        batchData->setBackend(mBackend);
         batchData->setDataType(mDataTypes[i]);
         batchTensors.push_back(batchData);
     }
@@ -78,6 +79,8 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con
 
         // Browse each modularity in the database item
         for (std::size_t j = 0; j < mNumberModality; ++j) {
+
+            dataItem[j]->setBackend(mBackend);
             auto dataSample = dataItem[j];
 
             // Assert tensor sizes
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index d1bf32594c9a79b6519613327c87370facc138ad..abfc91c6cdf9fd4f6eb46100074b22083514d82e 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -16,24 +16,20 @@
 
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Abs.hpp"
 #include "aidge/operator/Add.hpp"
 #include "aidge/operator/Div.hpp"
 #include "aidge/operator/Mul.hpp"
+#include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/Sub.hpp"
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/operator/Transpose.hpp"
 #include "aidge/utils/Types.h"
 
 
-/**
- * @brief Element-wise addition operation for two ``Tensor``s.
- * @note ``Tensor``s should be stored on the same backend.
- * @todo If input ``Tensor``s have a different dataType, the output should
- * have the dataType of the ``Tensor`` with the highest precision.
- *
- * @param other
- * @return Tensor
- */
+Aidge::Tensor::~Tensor() noexcept = default;
+
+
 Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
@@ -50,15 +46,7 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
     return add_.getOutput(0)->clone();
 }
 
-/**
- * @brief Element-wise substraction operation for two ``Tensor``s.
- * @note ``Tensor``s should be stored on the same backend.
- * @todo If input ``Tensor``s have a different dataType, the output should
- * have the dataType of the ``Tensor`` with the highest precision.
- *
- * @param other
- * @return Tensor
- */
+
 Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
@@ -75,15 +63,7 @@ Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
     return sub_.getOutput(0)->clone();
 }
 
-/**
- * @brief Element-wise multiplication operation for two ``Tensor``s.
- * @note ``Tensor``s should be stored on the same backend.
- * @todo If input ``Tensor``s have a different dataType, the output should
- * have the dataType of the ``Tensor`` with the highest precision.
- *
- * @param other
- * @return Tensor
- */
+
 Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
@@ -100,6 +80,7 @@ Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
     return mul_.getOutput(0)->clone();
 }
 
+
 Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
@@ -127,6 +108,32 @@ Aidge::Tensor Aidge::Tensor::sqrt() const {
     return sqrt_.getOutput(0)->clone();
 }
 
+Aidge::Tensor Aidge::Tensor::abs() const {
+    AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
+    auto abs_ = Abs_Op();
+    abs_.associateInput(0, std::make_shared<Tensor>(*this));
+    abs_.setDataType(dataType());
+    abs_.setDataFormat(dataFormat());
+    abs_.setBackend(mImpl->backend());
+    abs_.forward();
+    return abs_.getOutput(0)->clone();
+}
+
+Aidge::Tensor Aidge::Tensor::mean() const {
+    AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
+    // TODO: should be the default behavior of ReduceMean_Op
+    // No need to specify the list of all axes!
+    std::vector<std::int32_t> axes(nbDims());
+    std::iota(std::begin(axes), std::end(axes), 0);
+    auto mean_ = ReduceMean_Op(axes, false, false);
+    mean_.associateInput(0, std::make_shared<Tensor>(*this));
+    mean_.setDataType(dataType());
+    mean_.setDataFormat(dataFormat());
+    mean_.setBackend(mImpl->backend());
+    mean_.forward();
+    return mean_.getOutput(0)->clone();
+}
+
 Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
     if (this == &other) {
         return *this;
@@ -146,7 +153,23 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
     return *this;
 }
 
-Aidge::Tensor::~Tensor() noexcept = default;
+
+void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t device, bool copyFrom) {
+    if (mImpl) {
+        if (mImpl->device() != std::make_pair(name, device)) {
+            // Backend change: create new impl, copy from old to new and replace
+            // impl
+            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
+            if (copyFrom) {
+                newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0);
+            }
+            setImpl(newImpl);
+        }
+    }
+    else {
+        mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
+    }
+    }
 
 void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
                            std::vector<Aidge::DimSize_t> strides) {
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 4ec3334454034f20badb246b7030594bee0c0e48..c9f3cd14db84dab0faab065278664e68fb577db6 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -225,6 +225,18 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
     fmt::print(fp.get(), "\n");
 }
 
+void Aidge::GraphView::setNodesName() const {
+    std::map<std::string, std::int32_t> typeIds;
+    for (const auto& nodePtr: getNodes()) {
+        const std::string& t = nodePtr->getOperator()->type();
+        if (typeIds.find(t) == typeIds.cend()) {
+            typeIds.emplace(t, 0);
+        }
+        const std::string nodeName = name() + std::string("_") + t + std::string("#") + std::to_string(typeIds[t]++);
+        nodePtr->setName(nodeName);
+    }
+}
+
 void Aidge::GraphView::logOutputs(const std::string& dirName) const {
   if (!Aidge::createDirectories(dirName)){
     AIDGE_THROW_OR_ABORT(std::runtime_error, "Failed to create directory: {}.", dirName);
@@ -306,6 +318,7 @@ void Aidge::GraphView::setOrderedInputs(const std::vector<std::pair<NodePtr, IOI
 }
 
 void Aidge::GraphView::setOrderedOutputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& outputs) {
+  // Note: one can specify any node as graph output!
   size_t nbOutputs = 0;
   std::vector<std::pair<NodePtr, IOIndex_t>> ignoredOutputs(mOutputNodes);
   for (auto output : outputs) {
@@ -314,14 +327,13 @@ void Aidge::GraphView::setOrderedOutputs(const std::vector<std::pair<NodePtr, IO
     // it into account.
     if (output.first != nullptr) {
       auto it = std::find(ignoredOutputs.begin(), ignoredOutputs.end(), output);
-      AIDGE_ASSERT(it != ignoredOutputs.end(), "unknown or duplicate output: {} (of type {})", output.first->name(), output.first->type());
-      ignoredOutputs.erase(it);
+      if (it != ignoredOutputs.end()) {
+        ignoredOutputs.erase(it);
+      }
       ++nbOutputs;
     }
   }
 
-  AIDGE_ASSERT(nbOutputs <= mOutputNodes.size(), "too many specified number of outputs: {} specified vs {} available", nbOutputs, mOutputNodes.size());
-
   mOutputNodes = outputs;
   mOutputNodes.insert(mOutputNodes.end(), ignoredOutputs.begin(), ignoredOutputs.end());
 }
@@ -415,16 +427,36 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
     // Link every tensor to the right pointer
     // following parent - children informations
     if (!dims.empty()){
-      AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of given dimensions ({}) and graph inputs ({})", dims.size(), mInputNodes.size());
-      for (std::size_t i = 0; i < dims.size(); ++i){
+      Log::debug("forwardDims(): setting graph input dims ({} dims provided).", dims.size());
+
+      std::size_t i = 0;
+      for (auto& input : mInputNodes) {
         const auto& currentTensorPtr =
-            std::dynamic_pointer_cast<OperatorTensor>(mInputNodes[i].first->getOperator())->getInput(mInputNodes[i].second);
-        if (currentTensorPtr) { // tensor detected
-            AIDGE_ASSERT(currentTensorPtr->dims() == dims[i], "Tensor of unexpected size provided.")
-        } else {
-            auto tensor = std::make_shared<Tensor>(dims[i]);
-            mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
+            std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator())->getInput(input.second);
+        if (i < dims.size() && !dims[i].empty()) {
+          if (currentTensorPtr) { // tensor detected
+              AIDGE_ASSERT(currentTensorPtr->dims() == dims[i],
+                "forwardDims(): mismatch between existing and provided size for graph input#{} (existing size: {}, provided size: {})",
+                i, currentTensorPtr->dims(), dims[i])
+          } else {
+              auto tensor = std::make_shared<Tensor>(dims[i]);
+              input.first->getOperator()->setInput(input.second, tensor);
+          }
+        }
+        else {
+          const bool optional = (input.first->inputCategory(input.second) == InputCategory::OptionalData
+            || input.first->inputCategory(input.second) == InputCategory::OptionalParam);
+
+          if (currentTensorPtr) {
+            Log::debug("forwardDims(): existing dims are {} for graph input#{} for input#{} of node {} (of type {})",
+              i, input.second, input.first->name(), input.first->type(), currentTensorPtr->dims());
+          }
+          else if (!optional) {
+            Log::warn("forwardDims(): did not specify dims for mandatory graph input#{} for input#{} of node {} (of type {})",
+              i, input.second, input.first->name(), input.first->type());
+          }
         }
+        ++i;
       }
     }
 
diff --git a/src/graph/Matching.cpp b/src/graph/Matching.cpp
index a840b6ab552d71990b796d741d3ca56b07c7c0be..4a62019a7aa044ebcf2089d91f3ba097d85218e7 100644
--- a/src/graph/Matching.cpp
+++ b/src/graph/Matching.cpp
@@ -2,6 +2,33 @@
 
 #include <fmt/color.h>
 
+Aidge::SinglePassGraphMatching::Context::Context() = default;
+Aidge::SinglePassGraphMatching::Context::Context(const Context& other) = default;
+Aidge::SinglePassGraphMatching::Context& Aidge::SinglePassGraphMatching::Context::operator=(const Context& other) = default;
+Aidge::SinglePassGraphMatching::Context::~Context() = default;
+
+////////////////////////////////////////////////////////////
+
+Aidge::SinglePassGraphMatching::MatchingResult::MatchingResult() : graph(std::make_shared<GraphView>()), startNode(nullptr) {}
+Aidge::SinglePassGraphMatching::MatchingResult::MatchingResult(const Aidge::SinglePassGraphMatching::MatchingResult& other) {
+    graph = std::make_shared<GraphView>(*(other.graph.get()));
+    anchors = other.anchors;
+    startNode = other.startNode;
+}
+Aidge::SinglePassGraphMatching::MatchingResult& Aidge::SinglePassGraphMatching::MatchingResult::operator=(const Aidge::SinglePassGraphMatching::MatchingResult& other) {
+    graph = std::make_shared<GraphView>(*(other.graph.get()));
+    anchors = other.anchors;
+    startNode = other.startNode;
+    return *this;
+}
+Aidge::SinglePassGraphMatching::MatchingResult::~MatchingResult() noexcept = default;
+
+//////////////////////////////////////////////////////////
+
+Aidge::SinglePassGraphMatching::SinglePassGraphMatching(const Aidge::SinglePassGraphMatching& other) = default;
+Aidge::SinglePassGraphMatching& Aidge::SinglePassGraphMatching::operator=(const Aidge::SinglePassGraphMatching& other) = default;
+Aidge::SinglePassGraphMatching::~SinglePassGraphMatching() noexcept = default;
+
 std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphMatching::match(const std::string& query, bool disjoint) {
     Context ctx;
     ctx.query = query;
@@ -29,6 +56,31 @@ std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphM
     return matches;
 }
 
+Aidge::SinglePassGraphMatching::MatchingResult Aidge::SinglePassGraphMatching::matchFrom(NodePtr startNode, const std::string& query) {
+    Context ctx;
+    ctx.query = query;
+    ctx.startNode = startNode;
+    std::set<MatchingResult> matches;
+
+    while (matchSequence(ctx, matches) || matchNodeOrBlock(ctx, matches)) {
+        removeWhiteSpace(ctx.query);
+        if (!ctx.query.empty() && ctx.query[0] == ';') {
+            ctx.query.erase(0, 1);
+        }
+        else {
+            break;
+        }
+    }
+
+    removeWhiteSpace(ctx.query);
+    if (!ctx.query.empty()) {
+        Log::warn("Syntax error, unable to parse remaining query: {}", ctx.query);
+    }
+
+    AIDGE_INTERNAL_ASSERT(matches.size() <= 1);
+    return (!matches.empty()) ? *matches.begin() : MatchingResult();
+}
+
 std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphMatching::filterLonguestDisjoint(const std::set<MatchingResult>& matches) {
     // Sort matches by highest number of nodes first, thanks to the CompareMatchingResultSize function
     std::set<MatchingResult, CompareMatchingResultSize> sortedMatches(matches.begin(), matches.end());
@@ -104,7 +156,7 @@ bool Aidge::SinglePassGraphMatching::matchNodeOrBlock(Context& ctx, std::set<Mat
         newCtx.query.erase(0, 1);
 
         removeWhiteSpace(newCtx.query);
-        const auto endQuantity = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        const auto endQuantity = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return !isdigit(c); });
         if (endQuantity != newCtx.query.begin()) {
             matchQuantity = std::stoi(newCtx.query.substr(0, endQuantity - newCtx.query.begin()));
@@ -191,8 +243,8 @@ bool Aidge::SinglePassGraphMatching::matchBlock(Context& ctx, std::set<MatchingR
     // SEQ | PAR | BLOCK | ALT | NODE
     if (!matchSequence(newCtx, newMatches)
         && !matchParallel(newCtx, newMatches)
-        && !matchBlock(newCtx, newMatches)
         && !matchAlternative(newCtx, newMatches)
+        && !matchBlock(newCtx, newMatches)
         && !matchNode(newCtx, newMatches))
     {
         Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
@@ -341,6 +393,9 @@ bool Aidge::SinglePassGraphMatching::matchAlternative(Context& ctx, std::set<Mat
         return false;
     }
     newCtx.query = altCtx.query;
+    newCtx.anchors.insert(altCtx.anchors.begin(), altCtx.anchors.end());
+    bool firstSequence = altCtx.firstSequence;
+    bool firstNode = altCtx.firstNode;
     newMatches.insert(altMatches.begin(), altMatches.end());
 
     bool found = false;
@@ -364,6 +419,11 @@ bool Aidge::SinglePassGraphMatching::matchAlternative(Context& ctx, std::set<Mat
             return false;
         }
         newCtx.query = altCtx.query;
+        newCtx.anchors.insert(altCtx.anchors.begin(), altCtx.anchors.end());
+        AIDGE_ASSERT(firstSequence == altCtx.firstSequence,
+            "Ill-formed query; inconsistency between alternatives regarding first sequence in query at: {}", ctx.query);
+        AIDGE_ASSERT(firstNode == altCtx.firstNode,
+            "Ill-formed query; inconsistency between alternatives regarding first node in query at: {}", ctx.query);
         newMatches.insert(altMatches.begin(), altMatches.end());
     }
 
@@ -372,6 +432,9 @@ bool Aidge::SinglePassGraphMatching::matchAlternative(Context& ctx, std::set<Mat
         return false;
     }
 
+    newCtx.firstSequence = firstSequence;
+    newCtx.firstNode = firstNode;
+
     --newCtx.depth;
     ctx = newCtx;
     matches = newMatches;
@@ -401,7 +464,7 @@ bool Aidge::SinglePassGraphMatching::matchEdge(Context& ctx, std::set<MatchingRe
     // optional first IO_INDEX
     int firstIdx = 0;
     bool foundFirst = false;
-    const auto endOutputIdx = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+    const auto endOutputIdx = std::find_if(newCtx.query.begin(), newCtx.query.end(),
         [](char c) { return !isdigit(c); });
     if (endOutputIdx != newCtx.query.begin()) {
         firstIdx = std::stoi(newCtx.query.substr(0, endOutputIdx - newCtx.query.begin()));
@@ -421,7 +484,7 @@ bool Aidge::SinglePassGraphMatching::matchEdge(Context& ctx, std::set<MatchingRe
         auto query = newCtx.query;
         query.erase(0, 1); // drop '-'
 
-        const auto endInputIdx = std::find_if(query.begin(), query.end(), 
+        const auto endInputIdx = std::find_if(query.begin(), query.end(),
             [](char c) { return !isdigit(c); });
         if (endInputIdx != query.begin()) {
             secondIdx = std::stoi(query.substr(0, endInputIdx - query.begin()));
@@ -486,7 +549,7 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     Log::debug("{}node", std::string(2*newCtx.depth, ' '));
     auto newMatches = matches;
 
-    // (TYPE | '.')
+    // (TYPE | '.' | '$')
     removeWhiteSpace(newCtx.query);
     if (newCtx.query.empty()) {
         Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
@@ -494,13 +557,19 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     }
 
     std::string type;
+    bool unconnected = false;
     if (newCtx.query[0] == '.') {
         // '.'
         newCtx.query.erase(0, 1); // drop '.'
     }
+    else if (newCtx.query[0] == '$') {
+        // '$'
+        newCtx.query.erase(0, 1); // drop '$'
+        unconnected = true;
+    }
     else {
         // TYPE
-        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return (!isalnum(c) && c != '_'); });
 
         if (endIdentifier == newCtx.query.begin()) {
@@ -515,11 +584,14 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     // ('#' ANCHOR)?
     std::string anchor = "";
     if (!newCtx.query.empty() && newCtx.query[0] == '#') {
+        AIDGE_ASSERT(!unconnected,
+            "Ill-formed query; an anchor cannot be specified for end of graph ($) in query at: {}", ctx.query);
+
         // '#'
         newCtx.query.erase(0, 1); // drop '#'
 
         // ANCHOR
-        const auto endAnchor = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        const auto endAnchor = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return (!isalnum(c) && c != '_'); });
         anchor = "#" + newCtx.query.substr(0, endAnchor - newCtx.query.begin());
         newCtx.query = newCtx.query.substr(endAnchor - newCtx.query.begin());
@@ -528,11 +600,14 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     // ('[' LAMBDA ']')?
     std::string lambda = "";
     if (!newCtx.query.empty() && newCtx.query[0] == '[') {
+        AIDGE_ASSERT(!unconnected,
+            "Ill-formed query; a lambda cannot be specified for end of graph ($) in query at: {}", ctx.query);
+
         // '['
         newCtx.query.erase(0, 1);
 
         // LAMBDA
-        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(), 
+        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return (!isalnum(c) && c != '_'); });
 
         if (endIdentifier == newCtx.query.begin()) {
@@ -554,9 +629,72 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     }
 
     // Parsing is done, try to match the node
-    if (newCtx.firstSequence && newCtx.firstNode) {
+    if (unconnected) {
+        for (auto it = newMatches.begin(); it != newMatches.end(); ) {
+            bool found = false;
+
+            if (newCtx.lookForChild) {
+                const auto outputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
+                    ? ((newCtx.edgeLeftIdx < it->startNode->nbOutputs())
+                        ? std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>(1, std::vector<std::pair<NodePtr, IOIndex_t>>(it->startNode->output(newCtx.edgeLeftIdx)))
+                        : std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>())
+                    : it->startNode->outputs();
+
+                for (const auto& output : outputs) {
+                    for (const auto& node : output) {
+                        if (!node.first) {
+                            continue;
+                        }
+
+                        if (newCtx.edgeRightIdx == gk_IODefaultIndex || node.second == newCtx.edgeRightIdx) {
+                            if (mGraph->inView(node.first) && !it->graph->inView(node.first)) {
+                                found = true;
+                                break;
+                            }
+                        }
+                    }
+
+                    if (found) {
+                        break;
+                    }
+                }
+            }
+            else {
+                const auto inputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
+                    ? ((newCtx.edgeLeftIdx < it->startNode->nbInputs())
+                        ? std::vector<std::pair<NodePtr, IOIndex_t>>(1, it->startNode->input(newCtx.edgeLeftIdx))
+                        : std::vector<std::pair<NodePtr, IOIndex_t>>())
+                    : it->startNode->inputs();
+
+                for (const auto& input : inputs) {
+                    if (!input.first) {
+                        continue;
+                    }
+
+                    if (newCtx.edgeRightIdx == gk_IODefaultIndex || input.second == newCtx.edgeRightIdx) {
+                        if (mGraph->inView(input.first) && !it->graph->inView(input.first)) {
+                            found = true;
+                            break;
+                        }
+                    }
+                }
+            }
+
+            if (found) {
+                it = newMatches.erase(it);
+            }
+            else {
+                ++it;
+            }
+        }
+
+        Log::debug("{}node $, found: {}", std::string(2*newCtx.depth + 2, ' '), newMatches.size());
+    }
+    else if (newCtx.firstSequence && newCtx.firstNode) {
         // First node of first sequence = root node
-        for (auto node : mGraph->getNodes()) {
+        const auto nodes = (newCtx.startNode) ? std::set<NodePtr>{newCtx.startNode} : mGraph->getNodes();
+
+        for (auto node : nodes) {
             if ((type.empty() || node->type() == type)
                 && (lambda.empty() || mLambda.at(lambda)(node)))
             {
@@ -600,7 +738,9 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
 
             if (newCtx.lookForChild) {
                 const auto outputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
-                    ? std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>(1, std::vector<std::pair<NodePtr, IOIndex_t>>(it->startNode->output(newCtx.edgeLeftIdx)))
+                    ? ((newCtx.edgeLeftIdx < it->startNode->nbOutputs())
+                        ? std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>(1, std::vector<std::pair<NodePtr, IOIndex_t>>(it->startNode->output(newCtx.edgeLeftIdx)))
+                        : std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>())
                     : it->startNode->outputs();
 
                 for (const auto& output : outputs) {
@@ -609,6 +749,10 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
                     }
 
                     for (const auto& node : output) {
+                        if (!node.first) {
+                            continue;
+                        }
+
                         if ((type.empty() || node.first->type() == type)
                             && (lambda.empty() || mLambda.at(lambda)(node.first))
                             && (newCtx.edgeRightIdx == gk_IODefaultIndex || node.second == newCtx.edgeRightIdx))
@@ -637,10 +781,16 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
             }
             else {
                 const auto inputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
-                    ? std::vector<std::pair<NodePtr, IOIndex_t>>(1, it->startNode->input(newCtx.edgeLeftIdx))
+                    ? ((newCtx.edgeLeftIdx < it->startNode->nbInputs())
+                        ? std::vector<std::pair<NodePtr, IOIndex_t>>(1, it->startNode->input(newCtx.edgeLeftIdx))
+                        : std::vector<std::pair<NodePtr, IOIndex_t>>())
                     : it->startNode->inputs();
 
                 for (const auto& input : inputs) {
+                    if (!input.first) {
+                        continue;
+                    }
+
                     if ((type.empty() || input.first->type() == type)
                         && (lambda.empty() || mLambda.at(lambda)(input.first))
                         && (newCtx.edgeRightIdx == gk_IODefaultIndex || input.second == newCtx.edgeRightIdx))
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 7aeeb8eee2cc6493f86461779342083f3f23d6e7..382052535cc6b5cd8089f720b8fa9f8d3a0ebce1 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -29,8 +29,13 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
       mIdInChildren(std::vector<std::vector<IOIndex_t>>(static_cast<std::size_t>(op->nbOutputs()),
                                                         std::vector<IOIndex_t>())),
       mIdOutParents(
-              std::vector<IOIndex_t>(static_cast<std::size_t>(op->nbInputs()), gk_IODefaultIndex)) {
+              std::vector<IOIndex_t>(static_cast<std::size_t>(op->nbInputs()), gk_IODefaultIndex))
+{
     // ctor
+    if (op) {
+        mForward.push_back([this](){ this->mOperator->forward(); return true; });
+        mBackward.push_back([this](){ this->mOperator->backward(); return true; });
+    }
 }
 
 ///////////////////////////////////////////////////////
@@ -82,13 +87,27 @@ std::string Aidge::Node::createUniqueName(std::string name){
 ///////////////////////////////////////////////////////
 
 void Aidge::Node::forward() {
-    assert((mOperator != nullptr) && "No Operator interface provided, can't run forward().\n");
-    mOperator->forward();
+    for (auto it = mForward.begin(); it != mForward.end(); ) {
+        const auto keep = (*it)();
+        if (!keep) {
+            it = mForward.erase(it);
+        }
+        else {
+            ++it;
+        }
+    }
 }
 
 void Aidge::Node::backward() {
-    assert((mOperator != nullptr) && "No Operator interface provided, can't run backward().\n");
-    mOperator->backward();
+    for (auto it = mBackward.begin(); it != mBackward.end(); ) {
+        const auto keep = (*it)();
+        if (!keep) {
+            it = mBackward.erase(it);
+        }
+        else {
+            ++it;
+        }
+    }
 }
 
 ///////////////////////////////////////////////////////
@@ -196,7 +215,7 @@ void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId)
         auto originalParent = input(inId);
         // remove original parent reference to child
         // find the output ID for original Parent
-        // find first occurence of child in the output's children
+        // find first occurrence of child in the output's children
         originalParent.first->removeChild(shared_from_this(), originalParent.second);
     }
     mIdOutParents[inId] = newNodeoutId;
diff --git a/src/graph/OpArgs.cpp b/src/graph/OpArgs.cpp
index 3b6ad475fffcd49b18e9a5f4f67c6fd804c5e187..84d9826244bd3bc4368af2233be0068571a12e9e 100644
--- a/src/graph/OpArgs.cpp
+++ b/src/graph/OpArgs.cpp
@@ -15,11 +15,15 @@
 #include <string>
 #include <memory>
 
+Aidge::OpArgs::OpArgs(const OpArgs&) = default;
+Aidge::OpArgs& Aidge::OpArgs::operator=(const OpArgs&) = default;
+Aidge::OpArgs::~OpArgs() noexcept = default;
+
 std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs, std::string name) {
     std::shared_ptr<GraphView> gv = std::make_shared<GraphView>(name);
     for (const OpArgs& elt : inputs) {
         if(elt.node() != nullptr) {
-            // Connect the first output (ordered) of each output node (ordered) 
+            // Connect the first output (ordered) of each output node (ordered)
             // to the next available input of the input node.
             AIDGE_ASSERT(static_cast<std::size_t>(elt.node()->getNbFreeDataInputs()) >= gv->outputNodes().size(),
                 "Sequential(): not enough free data inputs ({}) for input node {} (of type {}) to connect to all previous output nodes ({})",
@@ -34,7 +38,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs,
             gv->add(elt.node());
         }
         else {
-            // For each input node, connect the first output (ordered) of each 
+            // For each input node, connect the first output (ordered) of each
             // output node (ordered) to the next available input
             std::set<NodePtr> connectedInputs;
             for (const auto& node_in : elt.view()->getOrderedInputs()) {
diff --git a/src/operator/Abs.cpp b/src/operator/Abs.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a8ee706f6c993362e2569b6be86f5e17545ae679
--- /dev/null
+++ b/src/operator/Abs.cpp
@@ -0,0 +1,25 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Abs.hpp"
+
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Abs_Op::Type = "Abs";
+
+void Aidge::Abs_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Abs_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 57ece07152613b831675cdecd6526d4ab26af5cb..f9dc3335a3b62e87edf33d25c5a516a63c4129a0 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -22,6 +22,14 @@
 
 const std::string Aidge::Add_Op::Type = "Add";
 
+Aidge::Add_Op::Add_Op(const IOIndex_t nbIn)
+    : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
+{
+    if (nbIn == 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
+    }
+}
+
 Aidge::Add_Op::Add_Op(const Add_Op& op)
     : OperatorTensor(op)
 {
@@ -32,6 +40,10 @@ Aidge::Add_Op::Add_Op(const Add_Op& op)
     }
 }
 
+std::shared_ptr<Aidge::Operator> Aidge::Add_Op::clone() const {
+    return std::make_shared<Add_Op>(*this);
+}
+
 bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
@@ -71,4 +83,8 @@ bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(Add_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+std::shared_ptr<Aidge::Node> Aidge::Add(const IOIndex_t nbIn, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
 }
\ No newline at end of file
diff --git a/src/operator/And.cpp b/src/operator/And.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..43aeebe24ef0e6d0e0b820d1459f25d64e7054a7
--- /dev/null
+++ b/src/operator/And.cpp
@@ -0,0 +1,58 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>    // std::size_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/And.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::And_Op::Type = "And";
+
+bool Aidge::And_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for And Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::And_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(And_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..58ade4754a013a65af80e5b754d0d44ad3b18189
--- /dev/null
+++ b/src/operator/ArgMax.cpp
@@ -0,0 +1,53 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ArgMax.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ArgMax_Op::Type = "ArgMax";
+
+bool Aidge::ArgMax_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        // make Axis attribute positive
+        std::int32_t axis = mAttributes->template getAttr<ArgMaxAttr::Axis>();
+        axis = axis >= 0 ? axis: axis+static_cast<std::int32_t>(getInput(0)->nbDims());
+
+        // build output dimensions
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        if (mAttributes->template getAttr<ArgMaxAttr::KeepDims>()) {
+            outDims[axis] = 1;
+        }
+        else {
+            outDims.erase(outDims.begin() + static_cast<std::size_t>(axis));
+        }
+
+        // TODO: change {1} for {} when scalar Tensors are better handled.
+        mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
+        return true;
+    }
+    return false;
+}
+
+void Aidge::ArgMax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(ArgMax_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 53ffb93269e79c0ba940f1fb0d3d94cb494ad8ce..296ae789197f88c655c0097d94b370ef91f0189f 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -26,6 +26,7 @@
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
 
+
 template <Aidge::DimIdx_t DIM>
 Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
     : OperatorTensor(op),
@@ -38,6 +39,11 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::AvgPooling_Op<DIM>::clone() const {
+    return std::make_shared<AvgPooling_Op<DIM>>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -110,4 +116,17 @@ void Aidge::AvgPooling_Op<DIM>::setBackend(const std::string &name, Aidge::Devic
 template class Aidge::AvgPooling_Op<1>;
 template class Aidge::AvgPooling_Op<2>;
 template class Aidge::AvgPooling_Op<3>;
-template class Aidge::AvgPooling_Op<4>;
\ No newline at end of file
+template class Aidge::AvgPooling_Op<4>;
+
+////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name,
+                                           const std::array<Aidge::DimSize_t, DIM> &stride_dims) {
+    AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", AvgPooling_Op<DIM>::Type);
+    return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
+}
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&);
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&);
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&);
\ No newline at end of file
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index 98e5c2da20fc35e18d4fd69a79cf1d87ec9d60ca..a81cfc132773134889a5164762091229759b4f38 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -38,6 +38,11 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::BatchNorm_Op<DIM>::clone() const {
+    return std::make_shared<BatchNorm_Op<DIM>>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -95,7 +100,7 @@ template class Aidge::BatchNorm_Op<3>;
 template class Aidge::BatchNorm_Op<4>;
 
 template <Aidge::DimSize_t DIM>
-inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const DimSize_t nbFeatures,
+inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const Aidge::DimSize_t nbFeatures,
                                        const float epsilon,
                                        const float momentum,
                                        const std::string& name) {
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index 8df153a67d2214e4435d9fa0aac6e74d53e11b12..b6164a77cb47e0b9127fa4b02255ed0991805fe7 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -46,3 +46,7 @@ void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+std::shared_ptr<Aidge::Node> Aidge::Cast(const Aidge::DataType targetType, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
+}
\ No newline at end of file
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 4649a954a095d239dbe7de7bcbebf1025a3b22c6..c78afa8665322a9cbca42a3326d527c1ebd949d4 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -18,6 +18,35 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+const std::string Aidge::Concat_Op::Type = "Concat";
+
+Aidge::Concat_Op::Concat_Op(const Aidge::IOIndex_t nbIn, const std::int32_t axis)
+    : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ConcatAttr::Axis>(axis)))
+{
+    if (nbIn == 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
+    }
+    mImpl = std::make_shared<Concat_OpImpl>(*this);
+}
+
+Aidge::Concat_Op::Concat_Op(const Aidge::Concat_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Concat_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Concat_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Concat_Op::clone() const {
+    return std::make_shared<Concat_Op>(*this);
+}
+
 void Aidge::Concat_OpImpl::forward() {
     const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
     const DimSize_t axis = op.axis();
@@ -56,7 +85,6 @@ void Aidge::Concat_OpImpl::forward() {
     }
 }
 
-const std::string Aidge::Concat_Op::Type = "Concat";
 
 bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
     if (!inputsAssociated()) {
@@ -105,3 +133,9 @@ void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Concat(const Aidge::IOIndex_t nbIn, const std::int32_t axis, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
+}
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index a33af78779971e77da4f4e910b89b9263a1af5d6..92f4ec593a1dcb26a5a16ffb527667e39502e547 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -158,4 +158,27 @@ void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
 }
 
 template class Aidge::Conv_Op<1>;
-template class Aidge::Conv_Op<2>;
\ No newline at end of file
+template class Aidge::Conv_Op<2>;
+
+/////////////////////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Conv(Aidge::DimSize_t inChannels,
+                                  Aidge::DimSize_t outChannels,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilationDims,
+                                  bool noBias) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+    addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
+    if (!noBias) {
+        addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
+    }
+    return conv;
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Conv<1>(Aidge::DimSize_t, Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Conv<2>(Aidge::DimSize_t, Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index 342fd86195d5c2e85a63d990c4ebbb75e7f50a6b..9e95e78ea6867c41a332916b352f091ad528894a 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -157,4 +157,26 @@ void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::De
 }
 
 template class Aidge::ConvDepthWise_Op<1>;
-template class Aidge::ConvDepthWise_Op<2>;
\ No newline at end of file
+template class Aidge::ConvDepthWise_Op<2>;
+
+////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise(const Aidge::DimSize_t nbChannels,
+                                           const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                           const std::string& name,
+                                           const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                           const std::array<Aidge::DimSize_t, DIM> &dilationDims,
+                                           bool noBias) {
+    // FIXME: properly handle default w&b initialization in every cases
+    AIDGE_ASSERT(DIM<=MaxDim,"Too many kernel dimensions required by {}, not supported", ConvDepthWise_Op<DIM>::Type);
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+    addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
+    if (!noBias) {
+        addProducer(convDW, 2, {nbChannels}, "b");
+    }
+    return convDW;
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise<1>(Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise<2>(Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/DepthToSpace.cpp b/src/operator/DepthToSpace.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0c858548ec484c34a651efa4adec1cde7ccb9e54
--- /dev/null
+++ b/src/operator/DepthToSpace.cpp
@@ -0,0 +1,122 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/DepthToSpace.hpp"
+
+#include <array>
+#include <cstddef>  // std::size_t
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::DepthToSpace_OpImpl::forward() {
+    const DepthToSpace_Op& op = dynamic_cast<const DepthToSpace_Op&>(mOp);
+    // suppose an NCHW Tensor format
+
+    // Get input dimensions
+    const auto& dims = op.getInput(0)->dims<4>();
+    // get final output dimension
+    const std::array<DimSize_t, 4> final_dims = op.getOutput(0)->dims<4>();
+
+    std::size_t b = dims[0];
+    std::size_t c = dims[1] / (static_cast<DimSize_t>(op.blockSize()) * static_cast<DimSize_t>(op.blockSize()));
+    std::size_t h = dims[2];
+    std::size_t w = dims[3];
+
+    // Copt input tensor to output
+    op.setOutput(0, op.getInput(0));
+
+    // Step 1: Resize
+    const std::vector<DimSize_t> resize_dims =
+        (op.mode() == DepthToSpace_Op::Mode::CRD) ?
+            std::vector<DimSize_t>({b, c, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), h, w}) :
+            std::vector<DimSize_t>({b, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), c, h, w});
+    op.getOutput(0)->resize(resize_dims);
+
+    // Step 2: Transpose
+    const std::vector<DimSize_t> transpose_order =
+        (op.mode() == DepthToSpace_Op::Mode::CRD) ?
+            std::vector<DimSize_t>({0, 1, 4, 2, 5, 3}) :
+            std::vector<DimSize_t>({0, 3, 4, 1, 5, 2});
+    op.getOutput(0)->copyTranspose(*(op.getOutput(0)), transpose_order);
+
+    // Step 3: Final resize
+    op.getOutput(0)->resize(final_dims);
+}
+
+//////////////////////////////////////////////////////
+
+const std::string Aidge::DepthToSpace_Op::Type = "DepthToSpace";
+
+Aidge::DepthToSpace_Op::DepthToSpace_Op(const std::uint32_t blockSize, const Aidge::DepthToSpace_Op::Mode mode)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<DepthToSpaceAttr::BlockSize>(blockSize),
+        attr<DepthToSpaceAttr::Mode>(mode)))
+{
+    // ctor
+}
+
+Aidge::DepthToSpace_Op::DepthToSpace_Op(const Aidge::DepthToSpace_Op& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(DepthToSpace_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::DepthToSpace_Op::clone() const {
+    return std::make_shared<DepthToSpace_Op>(*this);
+}
+
+bool Aidge::DepthToSpace_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(getInput(0)->nbDims() == 4, "{} Operator only accepts 4-D input Tensors.", DepthToSpace_Op::Type);
+        AIDGE_ASSERT(getInput(0)->dims()[1] % (blockSize() * blockSize()) == 0, "Number of channels must be divisible by blocksize squared");
+
+        // Compute output dims
+        const std::array<DimSize_t, 4>& inDims = getInput(0)->dims<4>();
+        const std::vector<DimSize_t> outDims =
+                {inDims[0],
+                 inDims[1] / (static_cast<DimSize_t>(blockSize()) * static_cast<DimSize_t>(blockSize())),
+                 inDims[2] * static_cast<DimSize_t>(blockSize()),
+                 inDims[3] * static_cast<DimSize_t>(blockSize())};
+
+        mOutputs[0]->resize(outDims);
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::DepthToSpace_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<DepthToSpace_Op>::exists({name})) {
+        SET_IMPL_MACRO(DepthToSpace_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<DepthToSpace_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
+
+//////////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::DepthToSpace(const std::uint32_t blockSize,
+                                    const Aidge::DepthToSpace_Op::Mode mode,
+                                    const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<DepthToSpace_Op>(blockSize, mode), name);
+}
\ No newline at end of file
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 387a9516077a937cca5c20ad091547b7f1c5be6f..2140b17a3abee329effaae63fada187fc522495f 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -56,3 +56,9 @@ void Aidge::Div_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Div_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+///////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Div(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp
index 81c87f10b10210c2af203a05df53e3330bb33b72..ed1f79f79a3011f72da1a1804d84960595f880c0 100644
--- a/src/operator/Erf.cpp
+++ b/src/operator/Erf.cpp
@@ -19,7 +19,27 @@
 
 const std::string Aidge::Erf_Op::Type = "Erf";
 
+Aidge::Erf_Op::Erf_Op(const Aidge::Erf_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Erf_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Erf_Op::clone() const {
+    return std::make_shared<Erf_Op>(*this);
+}
+
 void Aidge::Erf_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Erf_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Erf(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Erf_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 44d499bc7e125c757f802e086c22e1e6c72e9216..577a1842d76d3f58763ccd598205935e2c6d6eb4 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -23,6 +23,10 @@
 
 const std::string Aidge::FC_Op::Type = "FC";
 
+std::shared_ptr<Aidge::Operator> Aidge::FC_Op::clone() const {
+    return std::make_shared<FC_Op>(*this);
+}
+
 void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs());
     AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
@@ -86,3 +90,16 @@ void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device
         getInput(2)->setBackend(name, device);
     }
 }
+
+std::shared_ptr<Aidge::Node> Aidge::FC(const Aidge::DimSize_t inChannels,
+                                       const Aidge::DimSize_t outChannels,
+                                       bool noBias,
+                                       const std::string& name) {
+    // FIXME: properly handle default w&b initialization in every cases
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
+    addProducer(fc, 1, {outChannels, inChannels}, "w");
+    if (!noBias) {
+        addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
+    }
+    return fc;
+}
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index abe73e54ede0611cb14e24332302c35afa91c2a9..1a2ec88bbfb2bfed134e779619a0a3f0604ce155 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -26,6 +26,24 @@
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::Fold_Op<DIM>::Type = "Fold";
 
+template <Aidge::DimIdx_t DIM>
+Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
+    }
+    else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::Fold_Op<DIM>::clone() const {
+    return std::make_shared<Fold_Op<DIM>>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -64,4 +82,19 @@ void Aidge::Fold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
-template class Aidge::Fold_Op<2>;
\ No newline at end of file
+template class Aidge::Fold_Op<2>;
+
+///////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Fold(const std::array<Aidge::DimSize_t, DIM> &outputDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilationDims) {
+    // FIXME: properly handle default w&b initialization in every cases
+    AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by Fold, not supported", Fold_Op<DIM>::Type);
+    return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Fold<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index cd3c4357434ec4b49b6ea05e0d2633adfee7bfd0..00d471f6dc3e1417e4b343002b12a26260030d30 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -20,6 +20,36 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
+const std::string Aidge::Gather_Op::Type = "Gather";
+
+
+Aidge::Gather_Op::Gather_Op(std::int8_t axis,
+              const std::vector<int64_t>& indices,
+              const std::vector<Aidge::DimSize_t>& gatheredShape)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<GatherAttr::Axis>(axis),
+        attr<GatherAttr::Indices>(indices),
+        attr<GatherAttr::GatheredShape>(gatheredShape)))
+{
+    mImpl = std::make_shared<Gather_OpImpl>(*this);
+}
+
+Aidge::Gather_Op::Gather_Op(const Aidge::Gather_Op& op)
+    : OperatorTensor(op), mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Gather_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Gather_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Gather_Op::clone() const {
+    return std::make_shared<Gather_Op>(*this);
+}
+
 void Aidge::Gather_OpImpl::forward() {
     const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
 
@@ -48,8 +78,6 @@ void Aidge::Gather_OpImpl::forward() {
     }
 }
 
-const std::string Aidge::Gather_Op::Type = "Gather";
-
 bool Aidge::Gather_Op::dimsForwarded() const {
     if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
@@ -113,3 +141,12 @@ void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+/////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Gather(std::int8_t axis,
+                                        const std::vector<int64_t>& indices,
+                                        const std::vector<Aidge::DimSize_t>& gatheredShape,
+                                        const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Gather_Op>(axis, indices, gatheredShape), name);
+}
\ No newline at end of file
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index d49e1f0838f623bca1546e54ea4f4e470d70e1c5..e8c66085de5bc7c808b7f2307a9a82b22a426bb2 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -18,6 +18,42 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+
+Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
+                                            const std::vector<Aidge::InputCategory>& inputsCategory,
+                                            Aidge::IOIndex_t nbOut)
+    : OperatorTensor(type, inputsCategory, nbOut)
+{
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
+
+Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
+                                            Aidge::IOIndex_t nbData,
+                                            Aidge::IOIndex_t nbParam,
+                                            Aidge::IOIndex_t nbOut)
+    : OperatorTensor(type, [nbData, nbParam]() {
+                            std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
+                            inputsCategory.resize(nbData + nbParam, InputCategory::Param);
+                            return inputsCategory;
+                        }(), nbOut),
+        mAttributes(std::make_shared<DynamicAttributes>())
+{
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
+
+Aidge::GenericOperator_Op::GenericOperator_Op(const Aidge::GenericOperator_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
+{
+    mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
+}
+
+Aidge::GenericOperator_Op::~GenericOperator_Op() noexcept = default;
+
+std::shared_ptr<Aidge::Operator> Aidge::GenericOperator_Op::clone() const {
+    return std::make_shared<GenericOperator_Op>(*this);
+}
+
 const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Identity
     = [](const std::vector<std::vector<std::size_t>>& inputsDims) { return inputsDims; };
 
@@ -55,3 +91,20 @@ void Aidge::GenericOperator_Op::setBackend(const std::string & name, DeviceIdx_t
         mOutputs[i]->setBackend(name, device);
     }
 }
+
+///////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
+                                            const std::vector<Aidge::InputCategory>& inputCategory,
+                                            Aidge::IOIndex_t nbOut,
+                                            const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, inputCategory, nbOut), name);
+}
+
+std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
+                                                Aidge::IOIndex_t nbData,
+                                                Aidge::IOIndex_t nbParam,
+                                                Aidge::IOIndex_t nbOut,
+                                                const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
+}
\ No newline at end of file
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index 1632c8a7677c884194494269e1a8cd93e7ef7822..e7b2bdffb979fe377de5c7bd1e86147874e7d043 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -21,6 +21,20 @@
 
 const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling";
 
+Aidge::GlobalAveragePooling_Op::GlobalAveragePooling_Op(const Aidge::GlobalAveragePooling_Op &op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::GlobalAveragePooling_Op::clone() const {
+    return std::make_shared<GlobalAveragePooling_Op>(*this);
+}
+
 bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
@@ -41,4 +55,10 @@ bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
-}
\ No newline at end of file
+}
+
+////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::GlobalAveragePooling(const std::string &name) {
+  return std::make_shared<Node>(std::make_shared<GlobalAveragePooling_Op>(), name);
+}
diff --git a/src/operator/GridSample.cpp b/src/operator/GridSample.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..fa1efc75a4c0a85717343ce4fcdea1a8adcfb4e7
--- /dev/null
+++ b/src/operator/GridSample.cpp
@@ -0,0 +1,114 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/GridSample.hpp"
+
+#include <cstddef>    // std::size_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+
+const std::string Aidge::GridSample_Op::Type = "GridSample";
+
+
+Aidge::GridSample_Op::GridSample_Op(
+    typename Aidge::GridSample_Op::Mode mode,
+    typename Aidge::GridSample_Op::PaddingMode paddingMode,
+    bool alignCorners)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+        attr<GridSampleAttr::Mode>(mode),
+        attr<GridSampleAttr::PaddingMode>(paddingMode),
+        attr<GridSampleAttr::AlignCorners>(alignCorners)))
+{
+    // ctor
+}
+
+
+Aidge::GridSample_Op::GridSample_Op(const Aidge::GridSample_Op& other)
+    : OperatorTensor(other),
+      mAttributes(other.mAttributes)
+{
+    if (other.mImpl) {
+        SET_IMPL_MACRO(GridSample_Op, *this, other.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+
+Aidge::GridSample_Op::~GridSample_Op() noexcept = default;
+
+
+std::shared_ptr<Aidge::Operator> Aidge::GridSample_Op::clone() const {
+    return std::make_shared<GridSample_Op>(*this);
+}
+
+
+bool Aidge::GridSample_Op::forwardDims(bool /*allowDataDependency*/) {
+    // TODO: adapt for other formats than NCHW
+    if (inputsAssociated()) {
+        // check data has batch and channel dimensions: (N, C, D0, D1, ..., DN)
+        AIDGE_ASSERT(getInput(0)->nbDims() > 2, "Input should have at least one spatial dimension.");
+        const std::size_t nbSpatialFeat = getInput(0)->nbDims() -2; // all except channels and batchs
+        // check grid field
+        // should be (N, D0_out, D1_out, ..., DN_out, N+1)
+        AIDGE_ASSERT(((getInput(1)->nbDims() == nbSpatialFeat + 2) &&
+            (getInput(1)->dims()[nbSpatialFeat+1] == nbSpatialFeat) &&
+            (getInput(1)->dims()[0] == getInput(0)->dims()[0])),
+            "Wrong grid size {} for {} operator.", getInput(1)->dims(), type());
+
+        std::vector<DimSize_t> outputDims{};
+        outputDims.reserve(nbSpatialFeat+2);
+        const std::vector<DimSize_t>& inputDims(getInput(1)->dims());
+        outputDims.push_back(inputDims[0]);
+        outputDims.push_back(getInput(0)->dims()[1]);
+        for (std::size_t i = 2; i < nbSpatialFeat+2; ++i) {
+            outputDims.push_back(inputDims[i-1]);
+        }
+
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+
+    return false;
+}
+
+
+
+void Aidge::GridSample_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(GridSample_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+
+////////////////////////////////////////////////
+
+
+std::shared_ptr<Aidge::Node> Aidge::GridSample(
+                        typename Aidge::GridSample_Op::Mode mode,
+                        typename Aidge::GridSample_Op::PaddingMode paddingMode,
+                        bool alignCorners,
+                        const std::string& name)
+{
+    return std::make_shared<Node>(
+        std::make_shared<GridSample_Op>(
+                mode,
+                paddingMode,
+                alignCorners),
+            name);
+}
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index 2b8107bfc77ef70b33a97032d350a42ec5f3f466..2f60eb2fd9c5d43c60ae7ee3af49c3b2e407a1fe 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -15,8 +15,35 @@
 
 const std::string Aidge::Identity_Op::Type = "Identity";
 
+Aidge::Identity_Op::Identity_Op()
+    : OperatorTensor(Type, {InputCategory::Data}, 1)
+{
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
+
+Aidge::Identity_Op::Identity_Op(const Aidge::Identity_Op& op)
+    : OperatorTensor(op)
+{
+    mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Identity_Op::clone() const {
+    return std::make_shared<Identity_Op>(*this);
+}
+
+bool Aidge::Identity_Op::dimsForwarded() const {
+    const auto& input0 = getInput(0);
+    return input0 ? (input0->undefined() ? false :
+                            input0->dims() == getOutput(0)->dims()) :
+                                false;
+}
+
 void Aidge::Identity_Op::forward() {
     // Perform a shallow copy
     *(mOutputs[0]) = *(mInputs[0]);
     runHooks();
 }
+
+std::shared_ptr<Aidge::Node> Aidge::Identity(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Identity_Op>(), name);
+}
diff --git a/src/operator/LeakyReLU.cpp b/src/operator/LeakyReLU.cpp
index 32e050ee1595cf83b5cd0ffbfeba6153dc2243af..9def23758d5f779f14dec2ee19199fe0f48c4980 100644
--- a/src/operator/LeakyReLU.cpp
+++ b/src/operator/LeakyReLU.cpp
@@ -9,8 +9,37 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/LeakyReLU.hpp"
+
+#include <memory>
 #include <string>
 
-#include "aidge/operator/LeakyReLU.hpp"
+#include "aidge/data/Tensor.hpp"
+
+const std::string Aidge::LeakyReLU_Op::Type = "LeakyReLU";
+
+Aidge::LeakyReLU_Op::LeakyReLU_Op(const Aidge::LeakyReLU_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::LeakyReLU_Op::clone() const {
+    return std::make_shared<LeakyReLU_Op>(*this);
+}
+
+void Aidge::LeakyReLU_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+/////////////////////////////////////
 
-const std::string Aidge::LeakyReLU_Op::Type = "LeakyReLU";
\ No newline at end of file
+std::shared_ptr<Aidge::Node> Aidge::LeakyReLU(float negativeSlope, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
+}
\ No newline at end of file
diff --git a/src/operator/Ln.cpp b/src/operator/Ln.cpp
index 7e9f9ad01186f53a0f89657acb72f6a544223068..31012cbb1eec22f8dc02497f9e46b88ec713eabe 100755
--- a/src/operator/Ln.cpp
+++ b/src/operator/Ln.cpp
@@ -19,7 +19,27 @@
 
 const std::string Aidge::Ln_Op::Type = "Ln";
 
+Aidge::Ln_Op::Ln_Op(const Aidge::Ln_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Ln_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Ln_Op::clone() const {
+    return std::make_shared<Ln_Op>(*this);
+}
+
 void Aidge::Ln_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     mImpl = Registrar<Ln_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
 }
+
+/////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Ln(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Ln_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 207229b93b0ae362f42c1bae6fb1455b5a2b9d3d..c95fe544cbd29f715e8bd7caae58193deaac6331 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -20,6 +20,20 @@
 
 const std::string Aidge::MatMul_Op::Type = "MatMul";
 
+Aidge::MatMul_Op::MatMul_Op(const Aidge::MatMul_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(MatMul_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::MatMul_Op::clone() const {
+    return std::make_shared<MatMul_Op>(*this);
+}
+
 bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated(false)) {
         if (getInput(0)->empty() && getInput(1)->empty()) {
@@ -82,3 +96,9 @@ void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     SET_IMPL_MACRO(MatMul_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::MatMul(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..85f2dd930f2d35b9d9e9ea597b588637a56cb952
--- /dev/null
+++ b/src/operator/MaxPooling.cpp
@@ -0,0 +1,104 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MaxPooling.hpp"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling";
+
+template <Aidge::DimIdx_t DIM>
+Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                            const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                            bool ceil_mode)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+    attr<MaxPoolingAttr::StrideDims>(stride_dims),
+    attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+    attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
+{}
+
+template <Aidge::DimIdx_t DIM>
+Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const Aidge::MaxPooling_Op<DIM>& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::MaxPooling_Op<DIM>::clone() const {
+    return std::make_shared<MaxPooling_Op<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::MaxPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        std::array<DimSize_t, DIM + 2> outputDims{};
+        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+
+        std::function<float(float)> roundingFunction;
+        if (mAttributes->template getAttr<MaxPoolingAttr::CeilMode>()) {
+            roundingFunction = [](float x) { return std::ceil(x); };
+        } else {
+            roundingFunction = [](float x) { return std::floor(x); };
+        }
+
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
+            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                                        roundingFunction(static_cast<float>(inputDims[dim+2] -
+                                                                mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
+                                        static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
+        }
+        outputDims[1] = inputDims[1];
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+    return false;
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::MaxPooling_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+template class Aidge::MaxPooling_Op<1>;
+template class Aidge::MaxPooling_Op<2>;
+template class Aidge::MaxPooling_Op<3>;
+
+///////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::MaxPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name,
+                                           const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                           bool ceil_mode)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, ceil_mode), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&, bool);
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 88a182f2ae7d51abb059faa64058fb701a033b56..f713fdaad793aebebf5047d4ebf1dfd5aca10cd1 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -75,6 +75,33 @@ void Aidge::Memorize_OpImpl::forward() {
 
 const std::string Aidge::Memorize_Op::Type = "Memorize";
 
+Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
+        mAttributes(std::make_shared<Attributes_>(
+                    attr<MemorizeAttr::ScheduleStep>(0),
+                    attr<MemorizeAttr::ForwardStep>(0),
+                    attr<MemorizeAttr::EndStep>(endStep)))
+{
+    mOutputs[1] = mOutputs[0];
+}
+
+Aidge::Memorize_Op::Memorize_Op(const Aidge::Memorize_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+    mOutputs[1] = mOutputs[0];
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Memorize_Op::clone() const {
+    return std::make_shared<Memorize_Op>(*this);
+}
+
+
 void Aidge::Memorize_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
     ++mAttributes->template getAttr<MemorizeAttr::ScheduleStep>();
@@ -125,3 +152,9 @@ void Aidge::Memorize_Op::forward() {
     ++mAttributes->template getAttr<MemorizeAttr::ForwardStep>();
     mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
 }
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Memorize(const std::uint32_t endStep, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
+}
\ No newline at end of file
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index e7c50033797c7c984b6b8da69d30f005bc69e70c..a7997bc1a07e633feaf0873078ddb1ebb9bc71d4 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -23,7 +23,13 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar
     : OperatorTensor(type, [graph]() {
         std::vector<InputCategory> inputsCategory;
         for (const auto& in : graph->getOrderedInputs()) {
-            inputsCategory.push_back(in.first->getOperator()->inputCategory(in.second));
+            if (in.first) {
+                inputsCategory.push_back(in.first->getOperator()->inputCategory(in.second));
+            }
+            else {
+                // Dummy input, default to OptionalData
+                inputsCategory.push_back(InputCategory::OptionalData);
+            }
         }
         return inputsCategory;
     }(), graph->getOrderedOutputs().size()),
@@ -38,6 +44,10 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar
     }
 }
 
+std::shared_ptr<Aidge::Operator> Aidge::MetaOperator_Op::clone() const {
+    return std::make_shared<MetaOperator_Op>(*this);
+}
+
 void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) {
     AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
     AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
@@ -59,6 +69,18 @@ void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
 }
 
+void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    if (Registrar<MetaOperator_Op>::exists({name, type()})) {
+        // A custom implementation exists for this meta operator
+        mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
+    }
+
+    // The micro-graph should always be set to the right backend, since it
+    // shares input/output tensors.
+    // Input/output tensors backend are updated here.
+    mGraph->setBackend(name, device);
+}
+
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
     if (mImpl) {
         return mImpl->getNbRequiredData(inputIdx);
@@ -182,3 +204,15 @@ void Aidge::MetaOperator_Op::forward() {
         mScheduler->forward(false);
     }
 }
+
+/////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::MetaOperator(const char *type,
+                                  const std::shared_ptr<Aidge::GraphView>& graph,
+                                  const std::string& name)
+{
+    auto op = std::make_shared<MetaOperator_Op>(type, graph);
+    auto node = std::make_shared<Node>(op, name);
+    op->setUpperNode(node);
+    return node;
+}
\ No newline at end of file
diff --git a/src/operator/MetaOperatorDefs/PaddedConv.cpp b/src/operator/MetaOperatorDefs/PaddedConv.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..31b1c675e9d577002350ea11dd0b42601a91ef76
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/PaddedConv.cpp
@@ -0,0 +1,86 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConv(Aidge::DimSize_t in_channels,
+                                  Aidge::DimSize_t out_channels,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+                                  bool no_bias)
+{
+    // auto metaOp = PaddedConv_Op<DIM>(kernel_dims, stride_dims, padding_dims, dilation_dims);
+    // if (!name.empty()) {
+    //     metaOp->getMicroGraph()->setName(name);
+    //     metaOp->getMicroGraph()->setNodesName();
+    // }
+    // auto metaOpNode = std::make_shared<Node>(metaOp, name);
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
+        std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "")
+    });
+    auto metaOpNode = MetaOperator("PaddedConv", graph, name);
+    addProducer(metaOpNode, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
+    if (!no_bias) {
+        addProducer(metaOpNode, 2, {out_channels}, "b");
+    }
+    return metaOpNode;
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<1>(const Aidge::DimSize_t, const Aidge::DimSize_t, const std::array<Aidge::DimSize_t,1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<2>(const Aidge::DimSize_t, const Aidge::DimSize_t, const std::array<Aidge::DimSize_t,2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op(
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims)
+{
+    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
+
+    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
+}
+template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&);
+template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+template <Aidge::DimSize_t DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConv(
+    Aidge::DimSize_t in_channels,
+    Aidge::DimSize_t out_channels,
+    Aidge::DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name,
+    const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+    const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+    const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+    bool no_bias)
+{
+    return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<1>(const Aidge::DimSize_t, const Aidge::DimSize_t, const Aidge::DimSize_t (&)[1], const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<2>(const Aidge::DimSize_t, const Aidge::DimSize_t, const Aidge::DimSize_t (&)[2], const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1c073b78a61763b46e330089cccfcc4bced352a4
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
@@ -0,0 +1,84 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(const Aidge::DimSize_t nb_channels,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+                                  bool no_bias)
+{
+    // auto metaOp = std::make_shared<Node>(PaddedConvDepthWise_Op<DIM>(kernel_dims, stride_dims, padding_dims, dilation_dims), name);
+    // if (!name.empty()) {
+    //     std::static_pointer_cast<MetaOperator_Op>(metaOp->getOperator())->getMicroGraph()->setNodesName();
+    // }
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
+        std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv_depth_wise" : "")
+    });
+    auto metaOpNode = MetaOperator("PaddedConvDepthWise", graph, name);
+    addProducer(metaOpNode, 1, append(nb_channels, append(Aidge::DimSize_t(1), kernel_dims)), "w");
+    if (!no_bias) {
+        addProducer(metaOpNode, 2, {nb_channels}, "b");
+    }
+    return metaOpNode;
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<1>(const Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<2>(const Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
+
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op(
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims)
+{
+    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
+
+    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
+}
+template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&);
+template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
+
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+template <Aidge::DimSize_t DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(
+    const Aidge::DimSize_t nb_channels,
+    Aidge::DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name,
+    const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+    const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+    const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+    bool no_bias)
+{
+    return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<1>(const Aidge::DimSize_t, const Aidge::DimSize_t (&)[1], const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<2>(const Aidge::DimSize_t, const Aidge::DimSize_t (&)[2], const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index 0f635ea655676e488343bb55d9de6423a997af7d..4190c10a06458036f2cd8953156b969afa51bebf 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -19,6 +19,27 @@ void Aidge::Move_OpImpl::forward() {
 
 const std::string Aidge::Move_Op::Type = "Move";
 
+Aidge::Move_Op::Move_Op()
+    : OperatorTensor(Type, {InputCategory::Data}, 1)
+{
+    mImpl = std::make_shared<Move_OpImpl>(*this);
+}
+
+Aidge::Move_Op::Move_Op(const Aidge::Move_Op& op)
+    : OperatorTensor(op)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Move_Op, *this, {op.getInput(0)->getImpl()->backend(), op.backend()});
+    }
+    else {
+        mImpl = std::make_shared<Move_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Move_Op::clone() const {
+    return std::make_shared<Move_Op>(*this);
+}
+
 void Aidge::Move_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Move_Op>::exists({mInputs[0]->getImpl()->backend(), name})) {
         SET_IMPL_MACRO(Move_Op, *this, {mInputs[0]->getImpl()->backend(), name});
@@ -28,3 +49,9 @@ void Aidge::Move_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Move(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Move_Op>(), name);
+}
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index ded67a11acd299e5407f0d7e74146f5bcd1bf86a..e2e32805f6fde7ab6831fe4756ca60ad42c3925a 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -23,6 +23,20 @@
 
 const std::string Aidge::Mul_Op::Type = "Mul";
 
+Aidge::Mul_Op::Mul_Op(const Aidge::Mul_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Mul_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Mul_Op::clone() const {
+    return std::make_shared<Mul_Op>(*this);
+}
+
 bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
@@ -56,3 +70,9 @@ void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Mul_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+///////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Mul(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
index c66e6c84af6df299e4786bbbb73767d6ee6374f5..5b1428c160f976a043bb5cbe6fc6cb3351bab336 100644
--- a/src/operator/Pad.cpp
+++ b/src/operator/Pad.cpp
@@ -10,10 +10,62 @@
  ********************************************************************************/
 
 #include "aidge/operator/Pad.hpp"
+
+#include <array>
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::Pad_Op<DIM>::Type = "Pad";
 
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::Pad_Op<DIM>::clone() const {
+    return std::make_shared<Pad_Op<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::Pad_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        std::array<DimSize_t, DIM + 2> outputDims{};
+        const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
+
+        for (std::size_t dim = 0; dim < DIM; ++dim) {
+            outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
+                                + inputDims[dim+2]
+                                + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
+        }
+        outputDims[1] = inputDims[1];
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+
+    return false;
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Pad_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
 template class Aidge::Pad_Op<1>;
 template class Aidge::Pad_Op<2>;
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Pad(const std::array<Aidge::DimSize_t, 2*DIM> &beginEndTuples,
+                                           const std::string& name,
+                                           const PadBorderType &borderType,
+                                           double borderValue)
+{
+    AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", Pad_Op<DIM>::Type);
+    return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Pad<1>(const std::array<Aidge::DimSize_t, 2> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
+template std::shared_ptr<Aidge::Node> Aidge::Pad<2>(const std::array<Aidge::DimSize_t, 4> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
+template std::shared_ptr<Aidge::Node> Aidge::Pad<3>(const std::array<Aidge::DimSize_t, 6> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
\ No newline at end of file
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 2fcc46a460ffd7c7f6746dfcd108acbaafe912de..5d32a06fd01d8674d8e072f14838f3fd80d1f30a 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -35,8 +35,33 @@ void Aidge::Pop_OpImpl::forward() {
     *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()});
 }
 
+//////////////////////////////////////////////////////////
+
 const std::string Aidge::Pop_Op::Type = "Pop";
 
+Aidge::Pop_Op::Pop_Op()
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0)))
+{
+    mImpl = std::make_shared<Pop_OpImpl>(*this);
+}
+
+Aidge::Pop_Op::Pop_Op(const Aidge::Pop_Op& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Pop_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Pop_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Pop_Op::clone() const {
+    return std::make_shared<Pop_Op>(*this);
+}
+
 bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         auto inputDims = getInput(0)->dims();
@@ -67,3 +92,9 @@ void Aidge::Pop_Op::forward() {
     Operator::forward();
     ++mAttributes->template getAttr<PopAttr::ForwardStep>();
 }
+
+///////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Pop(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Pop_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 2a50f9c7bad1e40cd6e69cfc0a22632439cfe000..1602c8c2aa28e305b340888cb3a77cb4d2fc4293 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -54,4 +54,10 @@ bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Pow_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Pow(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index bdb69452ec54fb635d0cbc299336071295f37ae1..e5c4a3e9e18af8b3236b612db2b959f5ce4ec30a 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -26,6 +26,17 @@
 
 const std::string Aidge::Producer_Op::Type = "Producer";
 
+template <std::size_t DIM>
+Aidge::Producer_Op::Producer_Op(
+            const std::array<Aidge::DimSize_t, DIM>& dims,
+            bool constant)
+    : OperatorTensor(Type, {}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ProdAttr::Constant>(constant)))
+{
+    mOutputs[0]->resize(dims);
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
 
 Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant)
     : OperatorTensor(Type, {}, 1),
@@ -59,6 +70,10 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     }
 }
 
+std::shared_ptr<Aidge::Operator> Aidge::Producer_Op::clone() const {
+    return std::make_shared<Producer_Op>(*this);
+}
+
 void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Producer_Op>::exists({name})){
         SET_IMPL_MACRO(Producer_Op, *this, name);
@@ -76,3 +91,75 @@ void Aidge::Producer_Op::forward() {
 
     runHooks();
 }
+
+void Aidge::Producer_Op::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const {
+    if (mAttributes->template getAttr<ProdAttr::Constant>()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
+    }
+    OperatorTensor::setOutput(outputIdx, data);
+}
+
+/////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Producer(const std::array<Aidge::DimSize_t, DIM> &dims,
+        const std::string& name,
+        bool constant)
+{
+  static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
+  return std::make_shared<Node>(std::make_shared<Producer_Op>(dims, constant), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Producer<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<4>(const std::array<Aidge::DimSize_t, 4>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<5>(const std::array<Aidge::DimSize_t, 5>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<6>(const std::array<Aidge::DimSize_t, 6>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<7>(const std::array<Aidge::DimSize_t, 7>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<8>(const std::array<Aidge::DimSize_t, 8>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<9>(const std::array<Aidge::DimSize_t, 9>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<10>(const std::array<Aidge::DimSize_t, 10>&, const std::string&, bool);
+
+std::shared_ptr<Aidge::Node> Aidge::Producer(const std::shared_ptr<Aidge::Tensor> tensor,
+            const std::string& name,
+            bool constant)
+{
+    return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor, constant), name);
+}
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::addProducer(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, DIM>& dims,
+        const std::string& extension)
+{
+    AIDGE_ASSERT(inputIdx < gk_IODefaultIndex, "Input index too high. Cannot create Producer");
+    static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
+    const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
+    auto prod = Producer(dims, prodName);
+    prod->addChild(otherNode, 0, inputIdx);
+    otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
+    return prod;
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<1>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 1>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<2>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 2>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<3>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 3>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<4>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 4>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<5>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 5>& dims,
+        const std::string& extension);
diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp
index 7b945a7d62ab0ef7f73a25f6f74430e725d17b48..03f9e0679facc452d5a8bdc71707a824240f15ac 100644
--- a/src/operator/ReLU.cpp
+++ b/src/operator/ReLU.cpp
@@ -19,7 +19,27 @@
 
 const std::string Aidge::ReLU_Op::Type = "ReLU";
 
+Aidge::ReLU_Op::ReLU_Op(const Aidge::ReLU_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(ReLU_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ReLU_Op::clone() const {
+    return std::make_shared<ReLU_Op>(*this);
+}
+
 void Aidge::ReLU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(ReLU_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+/////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ReLU(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 96f2f855f46275e167acb1300434f8bcdbdd7d3e..2a215d897884e936aa9265e5ae16b1774d94bae6 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -15,6 +15,7 @@
 #include <cstddef>    // std::size_t
 #include <cstdint>    // std::int32_t
 #include <memory>
+#include <numeric> // For std::iota
 #include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
@@ -26,6 +27,29 @@
 
 const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 
+Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ReduceMeanAttr::Axes>(axes),
+        attr<ReduceMeanAttr::KeepDims>(keep_dims),
+        attr<ReduceMeanAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
+{}
+
+Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ReduceMean_Op::clone() const {
+    return std::make_shared<ReduceMean_Op>(*this);
+}
+
 bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // make Axes attribute positive
@@ -38,6 +62,18 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
 
         // build output dimensions
         std::vector<DimSize_t> outDims = getInput(0)->dims();
+
+        if (axes.empty())
+        {
+            if(mAttributes->template getAttr<ReduceMeanAttr::NoopWithEmptyAxes>()) {
+                mOutputs[0]->resize(outDims);
+                return true;
+            }
+            // if no axes are provided and NoopWithEmptyAxes is false, reduce on all axes
+            axes.resize(getInput(0)->nbDims());
+            std::iota(axes.begin(), axes.end(), 0);
+        }
+
         if (mAttributes->template getAttr<ReduceMeanAttr::KeepDims>()) {
             std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
         }
@@ -56,4 +92,14 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(ReduceMean_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ReduceMean(const std::vector<std::int32_t> &axes,
+                                        bool keep_dims,
+                                        bool noop_with_empty_axes,
+                                        const std::string& name) {
+    AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
+    return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims, noop_with_empty_axes), name);
 }
\ No newline at end of file
diff --git a/src/operator/ReduceSum.cpp b/src/operator/ReduceSum.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..aa8271f4c1696d46274e536e14d255525d848f80
--- /dev/null
+++ b/src/operator/ReduceSum.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ReduceSum.hpp"
+
+#include <algorithm>  // std::for_each, std::sort
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <numeric> // For std::iota
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ReduceSum_Op::Type = "ReduceSum";
+
+bool Aidge::ReduceSum_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        // make Axes attribute positive
+        std::vector<std::int32_t>& axes = mAttributes->template getAttr<ReduceSumAttr::Axes>();
+        std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
+            if (val < 0)
+                val+=static_cast<std::int32_t>(getInput(0)->nbDims());
+        });
+        std::sort(axes.begin(), axes.end());
+
+        // build output dimensions
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+
+        if (axes.empty())
+        {
+            if(mAttributes->template getAttr<ReduceSumAttr::NoopWithEmptyAxes>()) {
+                mOutputs[0]->resize(outDims);
+                return true;
+            }
+            // if no axes are provided and NoopWithEmptyAxes is false, reduce on all axes
+            axes.resize(getInput(0)->nbDims());
+            std::iota(axes.begin(), axes.end(), 0);
+        }
+
+        if (mAttributes->template getAttr<ReduceSumAttr::KeepDims>()) {
+            std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
+        }
+        else {
+            for (auto it = axes.crbegin(); it != axes.crend(); ++it)
+                outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
+        }
+
+        // TODO: change {1} for {} when scalar Tensors are better handled.
+        mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
+        return true;
+    }
+    return false;
+}
+
+void Aidge::ReduceSum_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(ReduceSum_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
\ No newline at end of file
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index cc31eeea758853a4183569d58412c427bd32006c..5139a0b0c98b11a0cbf6770397be56c830d0aa49 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -28,8 +28,35 @@ void Aidge::Reshape_OpImpl::forward() {
     op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
 }
 
+//////////////////////////////////////////////////
+
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
+Aidge::Reshape_Op::Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ReshapeAttr::Shape>(shape),
+        attr<ReshapeAttr::AllowZero>(allowzero)))
+{
+    mImpl = std::make_shared<Reshape_OpImpl>(*this);
+}
+
+Aidge::Reshape_Op::Reshape_Op(const Aidge::Reshape_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Reshape_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Reshape_Op::clone() const {
+    return std::make_shared<Reshape_Op>(*this);
+}
+
 bool Aidge::Reshape_Op::dimsForwarded() const {
     if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
@@ -108,3 +135,12 @@ void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t d
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+//////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Reshape(const std::vector<std::int64_t>& shape,
+                            bool allowzero,
+                            const std::string &name)
+{
+    return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape, allowzero), name);
+}
\ No newline at end of file
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index 0d407d4f97a17b8a89378bc83c1039423d9b2949..f3a69848ebd3cb7dbfb43788d16030e21e071b9c 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -25,6 +25,35 @@
 
 const std::string Aidge::Resize_Op::Type = "Resize";
 
+Aidge::Resize_Op::Resize_Op()
+    : OperatorTensor(Type,
+        {InputCategory::Data,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData},
+        1) {}
+
+/**
+ * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+ * but not its input tensors (the new operator has no input associated).
+ * @param op Operator to copy.
+ */
+
+Aidge::Resize_Op::Resize_Op(const Aidge::Resize_Op& op)
+    : OperatorTensor(op)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Resize_Op, *this, op.backend());
+    }
+    else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Resize_Op::clone() const {
+    return std::make_shared<Resize_Op>(*this);
+}
+
 bool Aidge::Resize_Op::dimsForwarded() const {
     // in case of ROI add getInput(1) condition
     if ((getInput(1) && !getInput(1)->undefined())
@@ -89,10 +118,10 @@ bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
             std::shared_ptr<Tensor> fallback;
             const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
 
-            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {            
+            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {
                 outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
             }
-            
+
             mOutputs[0]->resize(outDims);
             return true;
         }
@@ -101,14 +130,14 @@ bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
         }
     }
 
-    return false; 
+    return false;
 }
 
 void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Resize_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 
-    // By default, automatically set backend for all inputs: roi, scales and sizes 
+    // By default, automatically set backend for all inputs: roi, scales and sizes
     if(getInput(1)) {
         getInput(1)->setBackend(name, device);
     }
@@ -119,3 +148,9 @@ void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
         getInput(3)->setBackend(name, device);
     }
 }
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Resize(const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index dc5e272210feb09fd5dac6ba4b16f9ba8dc93bf0..a53695b58aab9ea8a50e15638b4c50d42cf444dd 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -20,7 +20,40 @@
 
 const std::string Aidge::Scaling_Op::Type = "Scaling";
 
+Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ScalingAttr::ScalingFactor>(scalingFactor),
+        attr<ScalingAttr::QuantizedNbBits>(nbBits),
+        attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
+{}
+
+Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Scaling_Op::clone() const {
+    return std::make_shared<Scaling_Op>(*this);
+}
+
 void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Scaling_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Scaling(float scalingFactor,
+                                     std::size_t quantizedNbBits,
+                                     bool isOutputUnsigned,
+                                     const std::string& name)
+{
+    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
 }
\ No newline at end of file
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index 39f5e2fe09b7ac750b8ea9d48d17fc2e97013c1a..f2ad1005907b71ee279b9d9bc9853b667108855c 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -30,8 +30,35 @@ void Aidge::Shape_OpImpl::forward() {
                                          end - start + 1);
 }
 
+///////////////////////////////////////////////
+
 const std::string Aidge::Shape_Op::Type = "Shape";
 
+Aidge::Shape_Op::Shape_Op(const std::int64_t start, const std::int64_t end)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ShapeAttr::Start>(start),
+        attr<ShapeAttr::End>(end)))
+{
+    mImpl = std::make_shared<Shape_OpImpl>(*this);
+}
+
+Aidge::Shape_Op::Shape_Op(const Aidge::Shape_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Shape_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Shape_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Shape_Op::clone() const {
+    return std::make_shared<Shape_Op>(*this);
+}
+
 bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         if (this->start() < 0)
@@ -63,3 +90,9 @@ void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+//////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Shape(const std::int64_t start, const std::int64_t end, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
+}
\ No newline at end of file
diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp
index ede83e291bd1670885192e3ac8f4958e185c28e2..63480ffccaaf78b2dd951c75b3830a8dfede7d99 100644
--- a/src/operator/ShiftGELU.cpp
+++ b/src/operator/ShiftGELU.cpp
@@ -21,7 +21,29 @@
 
 const std::string Aidge::ShiftGELU_Op::Type = "ShiftGELU";
 
+Aidge::ShiftGELU_Op::ShiftGELU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::ShiftGELU_Op::ShiftGELU_Op(const Aidge::ShiftGELU_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ShiftGELU_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ShiftGELU_Op::clone() const {
+    return std::make_shared<ShiftGELU_Op>(*this);
+}
+
 void Aidge::ShiftGELU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(ShiftGELU_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+///////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ShiftGELU(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ShiftGELU_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp
index eb77ae655354eac03fbdc0f1a84a44391795ee8c..5b0dd7ace0984c2397ef3a7bb4ef7a5526f4f288 100644
--- a/src/operator/ShiftMax.cpp
+++ b/src/operator/ShiftMax.cpp
@@ -21,7 +21,33 @@
 
 const std::string Aidge::ShiftMax_Op::Type = "ShiftMax";
 
+Aidge::ShiftMax_Op::ShiftMax_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::ShiftMax_Op::ShiftMax_Op(const Aidge::ShiftMax_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ShiftMax_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+/**
+ * @brief Clone the operator using its copy-constructor.
+ * @see Operator::ShiftMax_Op
+ */
+std::shared_ptr<Aidge::Operator> Aidge::ShiftMax_Op::clone() const {
+    return std::make_shared<ShiftMax_Op>(*this);
+}
+
 void Aidge::ShiftMax_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(ShiftMax_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+/////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ShiftMax(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ShiftMax_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp
index a6edcf823695f95253d6c56e45975480909679d3..aa112378fde50c7f36c63b8c0a8d00ed0baab12b 100644
--- a/src/operator/Sigmoid.cpp
+++ b/src/operator/Sigmoid.cpp
@@ -20,7 +20,30 @@
 
 const std::string Aidge::Sigmoid_Op::Type = "Sigmoid";
 
+Aidge::Sigmoid_Op::Sigmoid_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::Sigmoid_Op::Sigmoid_Op(const Aidge::Sigmoid_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Sigmoid_Op::clone() const {
+    return std::make_shared<Sigmoid_Op>(*this);
+}
+
+
 void Aidge::Sigmoid_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Sigmoid_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+///////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Sigmoid(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Sigmoid_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 4fcfd587a9b3d8858b2e8a71605743c6702cb310..bd7a4750dcbb129b56c541b3e75c2ec6faa7d55a 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -11,7 +11,6 @@
 
 #include "aidge/operator/Slice.hpp"
 
-#include <cassert>
 #include <cstddef>
 #include <cstdint>
 #include <string>
@@ -28,6 +27,41 @@
 
 const std::string Aidge::Slice_Op::Type = "Slice";
 
+Aidge::Slice_Op::Slice_Op(const std::vector<std::int64_t>& starts,
+                        const std::vector<std::int64_t>& ends,
+                        const std::vector<std::int8_t>& axes,
+                        const std::vector<std::int64_t>& steps)
+    : OperatorTensor(Type,
+        {InputCategory::Data,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData},
+        1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<SliceAttr::Starts>(starts),
+        attr<SliceAttr::Ends>(ends),
+        attr<SliceAttr::Axes>(axes),
+        attr<SliceAttr::Steps>(steps)))
+{}
+
+Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op &op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Slice_Op, *this, op.backend());
+    }
+    else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Slice_Op::clone() const {
+    return std::make_shared<Slice_Op>(*this);
+}
+
+
 bool Aidge::Slice_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined())
         || (getInput(2) && !getInput(2)->undefined())
@@ -177,3 +211,13 @@ void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     SET_IMPL_MACRO(Slice_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Slice(const std::vector<std::int64_t>& starts,
+                                   const std::vector<std::int64_t>& ends,
+                                   const std::vector<std::int8_t>& axes,
+                                   const std::vector<std::int64_t>& steps,
+                                   const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes, steps), name);
+}
\ No newline at end of file
diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp
index 612c61b0f66b97eb4630214538a22154a67b80d8..f425d6fffb8934f00b1c503c1d296b8318377cb0 100644
--- a/src/operator/Softmax.cpp
+++ b/src/operator/Softmax.cpp
@@ -20,7 +20,34 @@
 
 const std::string Aidge::Softmax_Op::Type = "Softmax";
 
+Aidge::Softmax_Op::Softmax_Op(std::int32_t axis)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<SoftmaxAttr::Axis>(axis)))
+{}
+
+Aidge::Softmax_Op::Softmax_Op(const Aidge::Softmax_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Softmax_Op::clone() const {
+    return std::make_shared<Softmax_Op>(*this);
+}
+
 void Aidge::Softmax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Softmax_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Softmax(std::int32_t axis, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
 }
\ No newline at end of file
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index af7474d8a21db9ece237440b46ecf57db9b270b4..9c56c6a2a28c6acb8c3943cd859fdbe78fd2cd1b 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -52,8 +52,37 @@ void Aidge::Split_OpImpl::forward() {
     }
 }
 
+/////////////////////////////////////////////////////
+
 const std::string Aidge::Split_Op::Type = "Split";
 
+Aidge::Split_Op::Split_Op(std::int8_t axis,
+                        Aidge::DimSize_t nbOutputs,
+                        const std::vector<Aidge::DimSize_t>& split)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<SplitAttr::Axis>(axis),
+        attr<SplitAttr::Split>(split)))
+{
+    mImpl = std::make_shared<Split_OpImpl>(*this);
+}
+
+Aidge::Split_Op::Split_Op(const Aidge::Split_Op &op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Split_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Split_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Split_Op::clone() const {
+    return std::make_shared<Split_Op>(*this);
+}
+
 bool Aidge::Split_Op::dimsForwarded() const {
     if ((getInput(1) && !getInput(1)->undefined()))
     {
@@ -120,7 +149,7 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
 
         return true;
     }
-    
+
     return false;
 }
 
@@ -135,5 +164,14 @@ void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     {
         mOutputs[i]->setBackend(name, device);
     }
-    
+
 }
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Split(Aidge::DimSize_t nbOutput,
+                                   std::int8_t axis,
+                                   const std::vector<Aidge::DimSize_t>& split,
+                                   const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
+}
\ No newline at end of file
diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp
index d8ac8b8b0bf28110bd52493d7833f64e9e80fc6a..3af75a6ca19e301f6c14e1b5fd03d693c161dcc5 100644
--- a/src/operator/Sqrt.cpp
+++ b/src/operator/Sqrt.cpp
@@ -14,13 +14,35 @@
 #include <memory>
 #include <string>
 
+#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 const std::string Aidge::Sqrt_Op::Type = "Sqrt";
 
+Aidge::Sqrt_Op::Sqrt_Op(const Aidge::Sqrt_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Sqrt_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+
+std::shared_ptr<Aidge::Operator> Aidge::Sqrt_Op::clone() const {
+    return std::make_shared<Sqrt_Op>(*this);
+}
+
 void Aidge::Sqrt_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Sqrt_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Sqrt(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index 858b32beaf9e23e8e9e7f52cfe7176afe399843c..ee4fd5b0887c5d9fafa3acd5822334dba4070aa8 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -24,6 +24,20 @@
 
 const std::string Aidge::Sub_Op::Type = "Sub";
 
+Aidge::Sub_Op::Sub_Op(const Aidge::Sub_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Sub_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Sub_Op::clone() const {
+    return std::make_shared<Sub_Op>(*this);
+}
+
 bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
@@ -57,3 +71,9 @@ void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Sub_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+//////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Sub(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
+}
diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp
index c113ee6f2da52f40a66a8df04ca33ec4b85f3387..1f936b6c8c5f61d86e2832c4bee7b943fa8268a1 100644
--- a/src/operator/Tanh.cpp
+++ b/src/operator/Tanh.cpp
@@ -20,7 +20,29 @@
 
 const std::string Aidge::Tanh_Op::Type = "Tanh";
 
+Aidge::Tanh_Op::Tanh_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::Tanh_Op::Tanh_Op(const Aidge::Tanh_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Tanh_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Tanh_Op::clone() const {
+    return std::make_shared<Tanh_Op>(*this);
+}
+
 void Aidge::Tanh_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Tanh_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Tanh(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Tanh_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 30372e44f8f9641734fc1109bf03a64794383a3e..bd1acee8a820ad2e3e54b7b0b21f979fc9ce1feb 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -28,8 +28,34 @@ void Aidge::TransposeImpl::forward() {
     op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.outputDimsOrder());
 }
 
+///////////////////////////////////////////////////
+
 const std::string Aidge::Transpose_Op::Type = "Transpose";
 
+Aidge::Transpose_Op::Transpose_Op(const std::vector<Aidge::DimSize_t> &outputDimsOrder)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)))
+{
+    mImpl = std::make_shared<TransposeImpl>(*this);
+}
+
+Aidge::Transpose_Op::Transpose_Op(const Aidge::Transpose_Op& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<TransposeImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Transpose_Op::clone() const {
+    return std::make_shared<Transpose_Op>(*this);
+}
+
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         AIDGE_ASSERT(!getInput(0)->empty(), "Not applicable on scalars.");
@@ -52,3 +78,10 @@ void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+//////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Transpose(const std::vector<Aidge::DimSize_t> &outputDimsOrder,
+                                           const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
+}
\ No newline at end of file
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
index 94c970fd3a246f0d9e1237e7cce0c15dd8e24526..2b12f33585a7388bd2411a8ae84ef43915516024 100644
--- a/src/operator/Unfold.cpp
+++ b/src/operator/Unfold.cpp
@@ -65,9 +65,44 @@ void Aidge::Unfold_OpImpl<DIM>::forward() {
     }
 }
 
+template class Aidge::Unfold_OpImpl<2>;
+
+/////////////////////////////////////////////////////////////
+
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::Unfold_Op<DIM>::Type = "Unfold";
 
+template <Aidge::DimIdx_t DIM>
+Aidge::Unfold_Op<DIM>::Unfold_Op(const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                    const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                    const std::array<Aidge::DimSize_t, DIM> &dilationDims)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<UnfoldAttr::StrideDims>(strideDims),
+        attr<UnfoldAttr::DilationDims>(dilationDims),
+        attr<UnfoldAttr::KernelDims>(kernelDims)))
+{
+    mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+Aidge::Unfold_Op<DIM>::Unfold_Op(const Aidge::Unfold_Op<DIM> &op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::Unfold_Op<DIM>::clone() const {
+    return std::make_shared<Unfold_Op>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
@@ -103,5 +138,20 @@ void Aidge::Unfold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx
     mOutputs[0]->setBackend(name, device);
 }
 
-template class Aidge::Unfold_OpImpl<2>;
-template class Aidge::Unfold_Op<2>;
\ No newline at end of file
+template class Aidge::Unfold_Op<2>;
+
+///////////////////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Unfold(const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilationDims) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
+    return std::make_shared<Node>(std::make_shared<Unfold_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Unfold<2>(const std::array<Aidge::DimSize_t, 2>&,
+                                  const std::string&,
+                                  const std::array<Aidge::DimSize_t, 2>&,
+                                  const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index aa20a056ad789975c5b4d493a1ce48dcd7592946..34722c19f8c0fddaffa7357136f1512a027e1617 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -16,6 +16,7 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/graph/Matching.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
@@ -25,9 +26,6 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-// Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
-
 void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
                           std::shared_ptr<Aidge::Node> batchnormNode) {
     // Case: convNode is a MetaOperator ending with a Convolution
@@ -92,13 +90,13 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
             meanVariance += b_var.get<float>(outChId);
             ++count;
         } else {
-            fmt::print("Zero-variance: {} [{}]\n", convNode->name(), outChId);
+            Log::notice("Zero-variance: {} [{}]\n", convNode->name(), outChId);
         }
     }
     if (count > 0)
         meanVariance /= count;
     else {
-        fmt::print("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
+        Log::notice("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
     }
 
     // Add bias if it is non existant, as there will be a bias after the fuse
@@ -191,44 +189,11 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
 
 }
 
-void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::MatchSolution> solution) {
-    assert(solution->at("BatchNorm").size() == 1 && "Wrong number of nodes BatchNorm to replace\n");
-    assert(solution->at("OP").size() == 1 && "Wrong number of nodes OP to replace\n");
-
-    for (const auto& op : solution->at("OP")) {
-        if (op->getOperator()->isAtomic()) {
-            for (const auto& batchNorm : solution->at("BatchNorm")) {
-                fuseBatchNorm(op, batchNorm);
-            }
-        } else {  // op is a MetaOperator
-            auto metaOp = std::dynamic_pointer_cast<MetaOperator_Op>(op->getOperator());
-            if ((metaOp->getMicroGraph()->getOrderedOutputs().size() == 1) &&
-                ((metaOp->getMicroGraph()->getOrderedOutputs()[0].first->type() ==
-                  Conv_Op<2>::Type) ||
-                 (metaOp->getMicroGraph()->getOrderedOutputs()[0].first->type() ==
-                  ConvDepthWise_Op<2>::Type))) {
-                for (const auto& batchNorm : solution->at("BatchNorm")) {
-                    fuseBatchNorm(op, batchNorm);
-                }
-            }
-        }
-    }
-}
-
 void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::GraphView> graphView) {
-    std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
-    regex->setNodeKey("BatchNorm", "getType($) =='BatchNorm'");
-    fmt::print("\n============================\nSearching for solutions\n==============================\n");
-    regex->setNodeKey(
-            "OP",
-            "getType($) =='Conv' || getType($) =='ConvDepthWise' || getType($) =='PaddedConv' || getType($) =='PaddedConvDepthWise'");
-            //  || getType($) =='FC' ");
-
-    regex->addQuery("OP -> BatchNorm");
-
-    for (const auto& solution : regex->match(graphView)) {
-
-        fuseBatchNorm(solution);
+    auto matches = SinglePassGraphMatching(graphView).match("(Conv|ConvDepthWise|PaddedConv|PaddedConvDepthWise)->BatchNorm");
 
+    for (auto match : matches) {
+        auto rootNode = match.graph->rootNode();
+        fuseBatchNorm(rootNode, *rootNode->getChildren().begin());
     }
-}
\ No newline at end of file
+}
diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/MatMulToFC.cpp
similarity index 60%
rename from src/recipes/FuseMulAdd.cpp
rename to src/recipes/MatMulToFC.cpp
index 6112fc47ece6bb361ebad626be7b5a6b1c2189bd..9b5addd3bb971b3f61980a582d4cce6435c57219 100644
--- a/src/recipes/FuseMulAdd.cpp
+++ b/src/recipes/MatMulToFC.cpp
@@ -22,28 +22,29 @@
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/operator/MatMul.hpp"
+#include "aidge/graph/Matching.hpp"
 
-//Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
 
-
-void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<Aidge::Node> addNode) { //std::set<std::shared_ptr<Node>> nodes){
+void Aidge::matMulToFC(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<Aidge::Node> addNode) {
     // Fuse Mulmat & Add into FC
     // Inputs : old nodes (pointers on mul & add)
-
-    assert((matmulNode->type() == "MatMul" && addNode->type() == "Add") && "Wrong type for the nodes to replace");
+    AIDGE_ASSERT((matmulNode->type() == "MatMul" && (addNode == nullptr || addNode->type() == "Add")),
+        "Wrong type for the nodes to replace: {} and {}",
+        matmulNode->type(), (addNode) ? addNode->type() : "nullptr");
 
 
     // Step 1 : Create FC
     // Fetch the output dimension throught the bias size
     std::shared_ptr<Node> bias = nullptr;
-    if (addNode->getParent(0) == matmulNode) {
-        AIDGE_ASSERT(addNode->getParent(1), "No bias detected to produce the fuseMulAdd recipe.");
-        bias = addNode->getParent(1);
-    }
-    else if (addNode->getParent(1) == matmulNode) {
-        AIDGE_ASSERT(addNode->getParent(0), "No bias detected to produce the fuseMulAdd recipe.");
-        bias = addNode->getParent(0);
+    if (addNode) {
+        if (addNode->getParent(0) == matmulNode) {
+            AIDGE_ASSERT(addNode->getParent(1), "No bias detected to produce the matMulToFC recipe.");
+            bias = addNode->getParent(1);
+        }
+        else if (addNode->getParent(1) == matmulNode) {
+            AIDGE_ASSERT(addNode->getParent(0), "No bias detected to produce the matMulToFC recipe.");
+            bias = addNode->getParent(0);
+        }
     }
 
     std::shared_ptr<Node> weight = nullptr;
@@ -75,24 +76,9 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     }
     AIDGE_ASSERT(weight != nullptr, "Could not deduce weight input for MatMul operator.");
 
-    // TODO: find another way to get OutChannels for FC operator.
-    // This poor fix supposes that one of Add inputs is a const and has the same outChannels as the output
-    DimSize_t outSize = 0;
-    AIDGE_ASSERT(addNode->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
-    const auto& op = std::static_pointer_cast<OperatorTensor>(addNode->getOperator());
-    for (size_t i = 0; i < op->nbInputs(); i++)
-    {
-        const auto& inTensor = op->getInput(i);
-        if(inTensor->nbDims() > 0) {
-            outSize = inTensor->dims()[inTensor->nbDims()-1];
-            break;
-        }
-    }
-    AIDGE_ASSERT(outSize, "Could not get output number of channels for FC operator.");
-
     // Instanciate FC
     std::string fcName = matmulNode->name();
-    if (!addNode->name().empty()) {
+    if (addNode && !addNode->name().empty()) {
         fcName += "_" + addNode->name();
     }
 
@@ -105,43 +91,26 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
         bias->cloneSharedOperators()->addChild(fc, 0, 2);
     }
 
-
     // Step 3 : Update all graphviews that contains at least one node to replace
         // Case 1 : If all nodes are in a graph view : delete old nodes & branch input & output
         // Case 2 : If not all nodes are in a graph view : only delete the nodes from the graphview
         // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory?
-    auto newNodes = std::set<std::shared_ptr<Node>>({fc, fc->getParent(1), fc->getParent(2)});
-    GraphView::replace({matmulNode, addNode, bias, weight}, newNodes);
-
-}
-
-
-void Aidge::fuseMulAdd(std::shared_ptr<Aidge::MatchSolution> solution){
-
-    assert(solution->at("MatMul").size() == 1 && "Wrong number of nodes MatMul to replace\n");
-    assert(solution->at("Add").size() == 1 && "Wrong number of nodes Add to replace\n");
-
-    for (const auto& matmulNode : solution->at("MatMul")) {
-        for (const auto& addNode : solution->at("Add")) {
-            fuseMulAdd(matmulNode,addNode);
-        }
+    if (addNode) {
+        auto newNodes = std::set<std::shared_ptr<Node>>({fc, fc->getParent(1), fc->getParent(2)});
+        GraphView::replace({matmulNode, addNode, bias, weight}, newNodes);
+    }
+    else {
+        auto newNodes = std::set<std::shared_ptr<Node>>({fc, fc->getParent(1)});
+        GraphView::replace({matmulNode, weight}, newNodes);
     }
-}
-
-
-void Aidge::fuseMulAdd(std::shared_ptr<Aidge::GraphView> graphView){
-
-
-    std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
-    regex->setNodeKey("Add","getType($) =='Add'");
-    regex->setNodeKey("MatMul","getType($) =='MatMul'");
-    regex->addQuery("MatMul -> Add ;");
-
-    for (const auto& solution : regex->match(graphView)) {
-
-        fuseMulAdd(solution);
 
+}
 
+void Aidge::matMulToFC(std::shared_ptr<Aidge::GraphView> graphView){
+    const auto matches = SinglePassGraphMatching(graphView).match("MatMul->Add#?");
 
+    for (const auto& match : matches) {
+        const auto it = match.anchors.find("Add");
+        matMulToFC(match.graph->rootNode(), (it != match.anchors.end()) ? it->second.at("#") : nullptr);
     }
 }
diff --git a/src/recipes/RemoveFlatten.cpp b/src/recipes/RemoveFlatten.cpp
index 8c1bf1bcf0bf79fda275867ff6430d5a937da172..bf80ab51749953a5b72d0e01f186265fdbb72e81 100644
--- a/src/recipes/RemoveFlatten.cpp
+++ b/src/recipes/RemoveFlatten.cpp
@@ -17,38 +17,20 @@
 
 
 //Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
+// #include "aidge/graphRegex/GraphRegex.hpp"
+#include "aidge/graph/Matching.hpp"
 
 
 namespace Aidge {
-    void removeFlatten(std::shared_ptr<Node> flatten) {
-        GraphView::replace({flatten}, {});
-    }
-
-    void removeFlatten(std::shared_ptr<MatchSolution> solution){
-
-        assert(solution->at("FC").size() == 1 && "Wrong number of nodes FC to replace\n");
-        assert(solution->at("Flatten").size() == 1 && "Wrong number of nodes Flatten to replace\n");
-
-        for (const auto& flatten : solution->at("Flatten")) {
-            removeFlatten(flatten);
-        }
-    }
-
-
-
     void removeFlatten(std::shared_ptr<GraphView> graphView){
-      
-
-        std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
-        regex->setNodeKey("Flatten","getType($) =='Flatten'");
-        regex->setNodeKey("FC","getType($) =='FC'");
-        regex->addQuery("Flatten->FC");
-
-        for (const auto& solution : regex->match(graphView)) {
-            removeFlatten(solution);
+        const auto matches = SinglePassGraphMatching(graphView).match(
+            "(FC|MatMul)<-(Flatten)+"
+        );
+
+        for (const auto& solution : matches) {
+            auto flattenNodes(solution.graph->getNodes());
+            flattenNodes.erase(solution.graph->rootNode());
+            GraphView::replace(flattenNodes, {});
         }
-
-
     }
 }
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 4bd470ac5d10a93b193bfa07544dd894dbe30a35..46968bafae14a1fc67119901e483f1cd6b49abb3 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -220,7 +220,7 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S
             bool isProducer = false;
             for (IOIndex_t outId = 0; outId < consumer->nbOutputs(); ++outId) {
                 for (const auto& child : consumer->getChildren(outId)) {
-                    if (child) {
+                    if (child && mGraphView->inView(child)) {
                         IOIndex_t inputIdx = 0;
                         for (const auto& childParent : child->getParents()) {
                             if (childParent == consumer) {
@@ -527,23 +527,28 @@ void Aidge::Scheduler::connectInputs(const std::vector<std::shared_ptr<Aidge::Te
     // This version of connect inputs only connects tensor inputs in input data producers.
     auto inputNodes = mGraphView->getOrderedInputs();
 
-    // Assert that the number of input data producers corresponds to the number of data input
-    if (data.size() != inputNodes.size()) {
-        const std::map<std::shared_ptr<Node>, std::string> namePtrTable
-            = mGraphView->getRankedNodesName("{0} ({1}#{3})");
-
-        std::vector<std::pair<std::string, IOIndex_t>> inputNodesName;
-        std::transform(inputNodes.begin(), inputNodes.end(),
-            std::back_inserter(inputNodesName),
-            [&namePtrTable](auto val){ return std::make_pair(namePtrTable.at(val.first), val.second); });
-
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Provided {} inputs to the scheduler, but graph has {} inputs (required inputs in order: )",
-            data.size(), inputNodes.size(), inputNodesName);
-    }
-
-    for (std::size_t i = 0; i < data.size(); ++i){
-        // TODO : maybe shallow copy instead of deepcopy
-        inputNodes[i].first->getOperator()->setInput(inputNodes[i].second, data[i]);
+    std::size_t i = 0;
+    for (auto& input : inputNodes) {
+        if (i < data.size() && data[i]) {
+            // TODO : maybe shallow copy instead of deepcopy
+            input.first->getOperator()->setInput(input.second, data[i]);
+        }
+        else {
+            const auto& currentTensorPtr =
+                std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator())->getInput(input.second);
+            const bool optional = (input.first->inputCategory(input.second) == InputCategory::OptionalData
+                || input.first->inputCategory(input.second) == InputCategory::OptionalParam);
+
+            if (currentTensorPtr) {
+                Log::debug("connectInputs(): existing tensor dims are {} for graph input#{} for input#{} of node {} (of type {})",
+                    i, input.second, input.first->name(), input.first->type(), currentTensorPtr->dims());
+            }
+            else if (!optional) {
+                Log::warn("connectInputs(): did not specify tensor for mandatory graph input#{} for input#{} of node {} (of type {})",
+                    i, input.second, input.first->name(), input.first->type());
+            }
+        }
+        ++i;
     }
 }
 
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index d9289c4aa3f4b44ce72d772c9a39dd8e66ab09e7..30071248699dbc2dd697d1d1f09c47ebcb217967 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -108,13 +108,6 @@ TEST_CASE("clone_with_delete", "[GraphView][cloneDelete]") {
     const size_t nbTests = 100;
     size_t nbClonedWithDelete = 0;
 
-    // Note: initial seed is chosen such that for nbTests=100, the generated
-    // graphs keep the same inputs/outputs despites the deleted nodes
-    // (meaning the deleted nodes are not input/output of the graph).
-    // Otherwise, the last two REQUIRE are not garanteed to be true!
-    // Warning: distributions are not required to behave the same way by the standard,
-    // therefore the seed has to work for both GCC and MSVC...
-    // See https://stackoverflow.com/questions/38532927/why-gcc-and-msvc-stdnormal-distribution-are-different
     std::mt19937::result_type seed(243);
 
     for (int test = 0; test < nbTests; ++test) {
@@ -124,7 +117,21 @@ TEST_CASE("clone_with_delete", "[GraphView][cloneDelete]") {
         const auto g1 = std::make_shared<GraphView>("g1");
         const bool unicity1 = g1->add(randGraph.gen(seed, 10));
 
-        if (unicity1) {
+        bool stableInOut = true;
+        for (auto node : g1->inputNodes()) {
+            if (node->type() == "DelFictive") {
+                stableInOut = false;
+                break;
+            }
+        }
+        for (auto node : g1->outputNodes()) {
+            if (node->type() == "DelFictive") {
+                stableInOut = false;
+                break;
+            }
+        }
+
+        if (unicity1 && stableInOut) {
             randGraph.omitType = "DelFictive";
             const auto g2 = std::make_shared<GraphView>("g2");
             const bool unicity2 = g2->add(randGraph.gen(seed, 10));
diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp
index 6abb4d37114d0952feb13c6cfbee66bd65dc5748..2fdcd611d378ceb6c3dbdc853920eecf92c31141 100644
--- a/unit_tests/graph/Test_Matching.cpp
+++ b/unit_tests/graph/Test_Matching.cpp
@@ -18,6 +18,8 @@
 #include "aidge/graph/Testing.hpp"
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/operator/Add.hpp"
+#include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/FC.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/MetaOperatorDefs.hpp"
 #include "aidge/operator/Producer.hpp"
@@ -27,7 +29,7 @@
 using namespace Aidge;
 
 void checkMatches(const std::set<SinglePassGraphMatching::MatchingResult>& results, const std::map<std::string, std::set<std::string>>& expected) {
-    REQUIRE(results.size() == expected.size());
+    CHECK(results.size() == expected.size());
 
     for (const auto& result : results) {
         const auto found = nodePtrTo(result.graph->getNodes(), nodePtrToName);
@@ -347,6 +349,94 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
+    auto g2 = Sequential({
+        Producer({16, 3, 512, 512}, "dataProvider"),
+        Conv(3, 4, {5, 5}, "conv1"),
+        BatchNorm<2>(4, 1.0e-5, 0.1, "bn1"),
+        Conv(4, 4, {5, 5}, "conv2"),
+        ReLU("relu2"),
+        Conv(4, 4, {5, 5}, "conv3"),
+        BatchNorm<2>(4, 1.0e-5, 0.1, "bn3"),
+        FC(4, 4, false, "fc1"),
+        FC(4, 4, false, "fc2"),
+        FC(4, 4, false, "fc3"),
+        ReLU("relu3"),
+        Conv(1, 4, {5, 5}, "conv4")
+    });
+
+    SECTION("((Conv#->(.[exBN]|$))|(FC#->(.[exFC])*->$))") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exBN", [](const NodePtr& node) {
+            return (node->type() != "BatchNorm");
+        });
+        gm.addNodeLambda("exFC", [](const NodePtr& node) {
+            return (node->type() != "FC");
+        });
+
+        const auto results = gm.match("((Conv#->(.[exBN]|$))|(FC#->(.[exFC])*->$))");
+
+        checkMatches(results, {
+            {"conv2", {"conv2", "relu2"}},
+            {"conv4", {"conv4"}},
+            {"fc3", {"fc3", "relu3", "conv4"}}
+        });
+    }
+
+    // Find last node of a type
+    SECTION("FC#->(.[exFC])*->$") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exFC", [](const NodePtr& node) {
+            return (node->type() != "FC");
+        });
+
+        const auto results = gm.match("FC#->(.[exFC])*->$");
+
+        checkMatches(results, {
+            {"fc3", {"fc3", "relu3", "conv4"}}
+        });
+    }
+
+    SECTION("Conv#->(.[exConv])*->$") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exConv", [](const NodePtr& node) {
+            return (node->type() != "Conv");
+        });
+
+        const auto results = gm.match("Conv#->(.[exConv])*->$");
+
+        checkMatches(results, {
+            {"conv4", {"conv4"}}
+        });
+    }
+
+    // Find first node of a type
+    SECTION("FC#<-(.[exFC])*<-$") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exFC", [](const NodePtr& node) {
+            return (node->type() != "FC");
+        });
+
+        const auto results = gm.match("FC#<-(.[exFC])*<-$");
+
+        checkMatches(results, {
+            {"fc1", {"fc1", "bn3", "conv3", "relu2", "conv2", "bn1", "conv1", "dataProvider"}}
+        });
+    }
+
+    SECTION("(((FC#|Conv#)<-(.[exParam])*<-$)|((FC#|Conv#)->(.[exParam])*->$));(FC#|Conv#)<1-Producer#") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exParam", [](const NodePtr& node) {
+            return (node->type() != "FC" && node->type() != "Conv");
+        });
+
+        const auto results = gm.match("(((FC#|Conv#)<-(.[exParam])*<-$)|((FC#|Conv#)->(.[exParam])*->$));(FC#|Conv#)<1-Producer#");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "conv1_w", "dataProvider"}},
+            {"conv4", {"conv4", "conv4_w"}}
+        });
+    }
+
     SECTION("Conv->ReLU [perf]") {
         const size_t nbTests = 3;
         std::mt19937::result_type seed(1);
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
index fbbc3f766857f15af0da8004c35078993d71e973..68ac509e79e347106a9a132249f125ebe6e39f6a 100644
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -175,27 +175,17 @@ TEST_CASE("GraphRegexUser") {
         matmul1->addChild(add1, 0, 0);
         b1->addChild(add1, 0, 1);
 
-        auto fc = GenericOperator("FC", 1, 0, 1, "c");
-        auto fl = GenericOperator("Flatten", 1, 0, 1, "c");
-
-
+        auto fc = GenericOperator("FC", 1, 0, 1, "fc1");
+        auto fl = GenericOperator("Flatten", 1, 0, 1, "flatten0");
+        add1->addChild(fl, 0, 0);
+        fl->addChild(fc, 0, 0);
         auto g = std::make_shared<GraphView>();
-        g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fl,fc});
-
-        std::shared_ptr<GraphRegex> kitchenBook = std::make_shared<GraphRegex>();
-
-        kitchenBook->setNodeKey("Add","getType($) =='Add'");
-        kitchenBook->setNodeKey("MatMul","getType($) =='MatMul'");
-        kitchenBook->setNodeKey("Flatten","getType($) =='Flatten'");
-        kitchenBook->setNodeKey("FC","getType($) =='FC'");
-
-        kitchenBook->addQuery("MatMul->Add",static_cast<void(*)(std::shared_ptr<MatchSolution>)>(fuseMulAdd));
-        kitchenBook->addQuery("Flatten->FC",static_cast<void(*)(std::shared_ptr<MatchSolution>)>(removeFlatten));
-
-        kitchenBook->appliedRecipes(g);
+        g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1, fl, fc});
 
+        matMulToFC(g);
+        removeFlatten(g);
         std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
-        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fc}));
+        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fl,fc}));
 	    //REQUIRE(newNodes.size() == 6);
 
 
diff --git a/unit_tests/operator/Test_DepthToSpaceImpl.cpp b/unit_tests/operator/Test_DepthToSpaceImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..62f760ce8b3942ab3101ff5e1324307a46048b91
--- /dev/null
+++ b/unit_tests/operator/Test_DepthToSpaceImpl.cpp
@@ -0,0 +1,87 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/DepthToSpace.hpp"
+
+
+namespace Aidge {
+
+TEST_CASE("[core/operator] DepthToSpace_Op", "[DepthToSpace][forwardDims]") {
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+
+    SECTION("Nb dimensions") {
+        // Create DepthToSpace operator with block_size of 1 compatible with any size
+        std::shared_ptr<Node> myDTS = DepthToSpace(1);
+        auto op = std::static_pointer_cast<OperatorTensor>(myDTS -> getOperator());
+
+        SECTION("Scalar") {
+            // input_0
+            std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(9);
+            op -> associateInput(0,T0);
+            REQUIRE_THROWS(op->forwardDims());
+        }
+        SECTION("+1-D") {
+            // input_0
+            std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+            op -> associateInput(0,T0);
+
+            for (std::uint16_t nb_dims = 0; nb_dims < 6; ++nb_dims) {
+
+                std::vector<std::size_t> dims0(nb_dims);
+                for (std::size_t i = 0; i < nb_dims; ++i) {
+                    dims0[i] = dimsDist(gen);
+                }
+                T0->resize(dims0);
+                if (nb_dims == 4) {
+                    REQUIRE_NOTHROW(op->forwardDims());
+                } else {
+                    REQUIRE_THROWS(op->forwardDims());
+                }
+            }
+        }
+    }
+
+    SECTION("Propagation") {
+        // input_0 with 4-D in NCHW format
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(std::vector<DimSize_t>({1, 16, 100, 100}));
+
+        DepthToSpace_Op myDTS_should_throw = DepthToSpace_Op(7);
+        myDTS_should_throw.associateInput(0,T0);
+
+        REQUIRE_THROWS(myDTS_should_throw.forwardDims());
+
+        DepthToSpace_Op myDTS_should_not_throw = DepthToSpace_Op(4);
+        myDTS_should_not_throw.associateInput(0,T0);
+
+        REQUIRE_NOTHROW(myDTS_should_not_throw.forwardDims());
+        REQUIRE(myDTS_should_not_throw.getOutput(0)->dims() == std::vector<std::size_t>({1,1,400,400}));
+    }
+}
+
+TEST_CASE("[core/operator] DepthToSpace_Op impl", "[DepthToSpace][forward]") {
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+
+
+}
+
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_GridSample_Op.cpp b/unit_tests/operator/Test_GridSample_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ae38ec7083a0df49fb241509bf52895765ddb0e8
--- /dev/null
+++ b/unit_tests/operator/Test_GridSample_Op.cpp
@@ -0,0 +1,89 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/GridSample.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+
+TEST_CASE("[core/operator] GridSample_Op(forwardDims)", "[GridSample][forwardDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    auto rd = Catch::Generators::Detail::getSeed;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create GridSample Operator
+    std::shared_ptr<Node> myGridSample = GridSample(GridSample_Op::Mode::Cubic, GridSample_Op::PaddingMode::Border, false);
+    auto op = std::static_pointer_cast<OperatorTensor>(myGridSample -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> data_in0 = std::make_shared<Tensor>();
+    op -> associateInput(0,data_in0);
+    // input_1
+    std::shared_ptr<Tensor> grid_in1 = std::make_shared<Tensor>();
+    op -> associateInput(1,grid_in1);
+
+    SECTION("Valid shape provided") {
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            std::size_t N = dimsDist(gen);
+            std::size_t C = dimsDist(gen);
+            std::size_t H_data_in0 = dimsDist(gen);
+            std::size_t W_data_in0 = dimsDist(gen);
+            std::size_t H_grid_in1 = dimsDist(gen);
+            std::size_t W_grid_in1 = dimsDist(gen);
+
+            data_in0->resize({N, C, H_data_in0, W_data_in0});
+            grid_in1->resize({N, H_grid_in1, W_grid_in1, 2});
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == std::vector<std::size_t>({N, C, H_grid_in1, W_grid_in1}));
+        }
+    }
+    SECTION("Invalid shape provided") {
+        std::size_t N_in = dimsDist(gen);
+        std::size_t C = dimsDist(gen);
+        std::size_t H_data_in0 = dimsDist(gen);
+        std::size_t W_data_in0 = dimsDist(gen);
+        std::size_t H_grid_in1 = dimsDist(gen);
+        std::size_t W_grid_in1 = dimsDist(gen);
+
+        // different batch number
+        std::size_t N_out = N_in+1;
+        data_in0->resize({N_in, C, H_data_in0, W_data_in0});
+        grid_in1->resize({N_out, H_grid_in1, W_grid_in1, 2});
+        REQUIRE_THROWS(op->forwardDims());
+
+        // different number of dimensions
+        data_in0->resize({N_in, C, H_data_in0, W_data_in0});
+        grid_in1->resize({N_in, H_grid_in1, W_grid_in1, 2, 2});
+        REQUIRE_THROWS(op->forwardDims());
+
+        // wrong number of pixel coordinates
+        data_in0->resize({N_in, C, H_data_in0, W_data_in0});
+        grid_in1->resize({N_in, H_grid_in1, W_grid_in1, 2 + dimsDist(gen)});
+        REQUIRE_THROWS(op->forwardDims());
+    }
+}
+
+} // namespace Aidge
diff --git a/unit_tests/recipes/Test_FuseMulAdd.cpp b/unit_tests/recipes/Test_FuseMulAdd.cpp
deleted file mode 100644
index 9ea151039f07e5c688572d61b746d8fc26f1c3fe..0000000000000000000000000000000000000000
--- a/unit_tests/recipes/Test_FuseMulAdd.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <catch2/catch_test_macros.hpp>
-#include <set>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/operator/Add.hpp"
-#include "aidge/operator/FC.hpp"
-#include "aidge/operator/MatMul.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/recipes/Recipes.hpp"
-
-namespace Aidge {
-
-
-TEST_CASE("[cpu/recipes] FuseMulAdd", "[FuseMulAdd][recipes]") {
-    // generate the original GraphView
-    auto matmul0 = MatMul("matmul0");
-    auto add0 = Add(2, "add0");
-    auto matmul1 = MatMul("matmul1");
-    auto add1 = Add(2, "add1");
-
-    auto b0 = Producer({5}, "B0");
-    auto w0 = Producer({5, 5}, "W0");
-    auto b1 = Producer({5}, "B1");
-    auto w1 = Producer({5,5},"W1");
-    auto input = Producer({2,5}, "input");
-
-    input->addChild(matmul0, 0, 0);
-    w0->addChild(matmul0, 0, 1);
-
-    matmul0->addChild(add0, 0, 0);
-    b0->addChild(add0, 0, 1);
-
-    add0->addChild(matmul1, 0, 1);
-    w1->addChild(matmul1, 0, 0);
-
-    matmul1->addChild(add1, 0, 0);
-    b1->addChild(add1, 0, 1);
-
-    auto g = std::make_shared<GraphView>();
-    g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1});
-
-    // Check original graph
-    REQUIRE(g->getNodes() ==
-            std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
-    REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
-    REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0)));
-    REQUIRE(((matmul1->getParent(1) == add0) && (matmul1->getParent(0) == w1)));
-    REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
-
-	// Transform GraphView inplace
-    fuseMulAdd(g);
-
-	// Check new GraphView
-	std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
-	REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
-	REQUIRE(newNodes.size() == 6);
-	for (const auto& node : newNodes) {
-		REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
-	}
-}
-
-}  // namespace Aidge
diff --git a/unit_tests/recipes/Test_MatMulToFC.cpp b/unit_tests/recipes/Test_MatMulToFC.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2adf882ca69e0d5ca5f050d1b89cfb09d81b536b
--- /dev/null
+++ b/unit_tests/recipes/Test_MatMulToFC.cpp
@@ -0,0 +1,118 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <set>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+namespace Aidge {
+
+
+TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") {
+    SECTION("with Add") {
+        // generate the original GraphView
+        auto matmul0 = MatMul("matmul0");
+        auto add0 = Add(2, "add0");
+        auto matmul1 = MatMul("matmul1");
+        auto add1 = Add(2, "add1");
+
+        auto b0 = Producer({5}, "B0");
+        auto w0 = Producer({5, 5}, "W0");
+        auto b1 = Producer({5}, "B1");
+        auto w1 = Producer({5,5},"W1");
+        auto input = Producer({2,5}, "input");
+
+        input->addChild(matmul0, 0, 0);
+        w0->addChild(matmul0, 0, 1);
+
+        matmul0->addChild(add0, 0, 0);
+        b0->addChild(add0, 0, 1);
+
+        add0->addChild(matmul1, 0, 1);
+        w1->addChild(matmul1, 0, 0);
+
+        matmul1->addChild(add1, 0, 0);
+        b1->addChild(add1, 0, 1);
+
+        auto g = std::make_shared<GraphView>();
+        g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1});
+
+        // Check original graph
+        REQUIRE(g->getNodes() ==
+                std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
+        REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
+        REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0)));
+        REQUIRE(((matmul1->getParent(1) == add0) && (matmul1->getParent(0) == w1)));
+        REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
+
+        // Transform GraphView inplace
+        matMulToFC(g);
+
+        // Check new GraphView
+        std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
+        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
+        REQUIRE(newNodes.size() == 6);
+        for (const auto& node : newNodes) {
+            REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
+        }
+    }
+
+    SECTION("without Add") {
+        // generate the original GraphView
+        auto matmul0 = MatMul("matmul0");
+        auto matmul1 = MatMul("matmul1");
+        auto add1 = Add(2, "add1");
+
+        auto w0 = Producer({5, 5}, "W0");
+        auto b1 = Producer({5}, "B1");
+        auto w1 = Producer({5,5},"W1");
+        auto input = Producer({2,5}, "input");
+
+        input->addChild(matmul0, 0, 0);
+        w0->addChild(matmul0, 0, 1);
+
+        matmul0->addChild(matmul1, 0, 1);
+        w1->addChild(matmul1, 0, 0);
+
+        matmul1->addChild(add1, 0, 0);
+        b1->addChild(add1, 0, 1);
+
+        auto g = std::make_shared<GraphView>();
+        g->add({w0, matmul0, w1, matmul1, b1, add1});
+
+        // Check original graph
+        REQUIRE(g->getNodes() ==
+                std::set<std::shared_ptr<Node>>({w0, matmul0, w1, matmul1, b1, add1}));
+        REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
+        REQUIRE(((matmul1->getParent(1) == matmul0) && (matmul1->getParent(0) == w1)));
+        REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
+
+        // Transform GraphView inplace
+        matMulToFC(g);
+
+        // Check new GraphView
+        std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
+        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, w1, matmul1, b1, add1}));
+        REQUIRE(newNodes.size() == 5);
+        for (const auto& node : newNodes) {
+            REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
+        }
+    }
+}
+
+}  // namespace Aidge
diff --git a/unit_tests/recipes/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp
index 24f5aa2e231b5204add1c8f87cdeb7a71175ea05..c3b4c08d98115c9f081bbbf8cb677114b66c545a 100644
--- a/unit_tests/recipes/Test_removeFlatten.cpp
+++ b/unit_tests/recipes/Test_removeFlatten.cpp
@@ -42,7 +42,7 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
 
     CHECK(g->getOrderedInputs().size() == 1);
     CHECK(g->getOrderedInputs()[0].first == fc0);
-    
+
     CHECK(fc0->getParent(0) == nullptr);
     CHECK(fc0->getChildren(0).size() == 1);
     CHECK(g->rootNode() == fc0);
@@ -54,10 +54,10 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
 
     CHECK(g->getOrderedInputs().size() == 1);
     CHECK(g->getOrderedInputs()[0].first == fc0);
-    
+
     CHECK(g->getOrderedOutputs().size() == 1);
     CHECK(g->getOrderedOutputs()[0].first == fc0);
-    
+
     CHECK(fc0->getParent(0) == nullptr);
     CHECK(fc0->getChildren(0).size() == 0);
     CHECK(g->rootNode() == fc0);
@@ -73,7 +73,7 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
 
     CHECK(g->getOrderedOutputs().size() == 1);
     CHECK(g->getOrderedOutputs()[0].first == fc1);
-    
+
     CHECK(fc1->getParent(0) == fc0);
     CHECK(fc0->getChildren(0)[0] == fc1);
     CHECK(g->rootNode() == fc0);
@@ -87,10 +87,10 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
     removeFlatten(g);
 
     CHECK(g->getOrderedInputs().size() == 0);
-    
+
     CHECK(g->getOrderedOutputs().size() == 1);
     CHECK(g->getOrderedOutputs()[0].first == fc0);
-    
+
     CHECK(fc0->getParent(0) == prod);
     CHECK(fc0->getChildren(0).size() == 0);