diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 2115b660fa38d3d077eaa9c416525a23c1d4c536..4f7079e59c4328885969e7dc7181395d1333d0af 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -55,7 +55,7 @@ public:
         T* dstT = static_cast<T *>(rawPtr(offset));
 
         AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "TensorImpl_cpu<{}>::copy(): overlapping copy is not supported", typeid(T).name());
-        std::copy(srcT, srcT + length, dstT);
+        std::copy_n(srcT, length, dstT);
     }
 
     void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final;
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 627a5a4784b4e6546cdfc96b65acbe2a39ee119c..fdeef2a8e20c2cd04ad31ae18a0f9b1befd5373b 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -112,7 +112,7 @@ class Tensor : public Data,
      * @tparam T datatype
      */
     template <typename T>
-    constexpr Tensor(Vector<T> &&arr)
+    Tensor(Vector<T> &&arr)
         : Data(Type),
           mDataType(NativeType<T>::type),
           mDims({arr.data.size()}),
@@ -204,13 +204,13 @@ class Tensor : public Data,
      * Tensor and the initial one.
      * @param other
      */
-    Tensor(const Tensor& other) = default;
+    Tensor(const Tensor& other);
 
     /**
      * @brief Move constructor.
      * @param other
      */
-    Tensor(Tensor&& other) = default;
+    Tensor(Tensor&& other);
 
     /**
      * @brief Copy dimensions, datatype and data from another Tensor.
@@ -219,8 +219,8 @@ class Tensor : public Data,
      * @param other other Tensor object.
      * @return Tensor&
      */
-    Tensor &operator=(const Tensor& other) = default;
-    Tensor &operator=(Tensor&& other) = default;
+    Tensor &operator=(const Tensor& other);
+    Tensor &operator=(Tensor&& other);
 
     template <typename T>
     constexpr Tensor &operator=(Vector<T> &&arr) {
diff --git a/include/aidge/graph/Connector.hpp b/include/aidge/graph/Connector.hpp
index 599ca7d6defd729b6e6536dcc95f326d345701d9..ec59e1b38c10cbe53eb667b724991ea8e5427a6e 100644
--- a/include/aidge/graph/Connector.hpp
+++ b/include/aidge/graph/Connector.hpp
@@ -11,10 +11,10 @@
 #ifndef AIDGE_CORE_GRAPH_CONNECTOR_H_
 #define AIDGE_CORE_GRAPH_CONNECTOR_H_
 
-#include <cassert>
 #include <memory>
 #include <vector>
 
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -55,7 +55,7 @@ class Connector {
 
    public:
     Connector operator[](IOIndex_t index) {
-        assert((size() > 1) && "Cannot refer a slice of the output.");
+        AIDGE_ASSERT((size() > 1), "Cannot refer a slice of the output.");
         return Connector(mNode, index);
     }
 
@@ -68,7 +68,7 @@ class Connector {
 
    private:
     Connector(std::shared_ptr<Node> node, IOIndex_t index) : mNode(node) {
-        assert((index != gk_IODefaultIndex) && (index < size()) &&
+        AIDGE_ASSERT((index != gk_IODefaultIndex) && (index < size()),
                "Non-valid output index.\n");
         mOutputId = index;
     }
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 76f5dcdfc28e90a3f83435841af21048bcb2a9c0..e122aa446bde05abdce2a1fe0899c1fec52e4dba 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -424,21 +424,7 @@ public:
         addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
     }
 
-    inline void updateNodeName(const std::shared_ptr<Node>& node, const std::string& newName){
-        if (!newName.empty()) {
-            auto itNew = mNodeRegistry.insert(std::make_pair(newName, node));
-            if (!itNew.second) {
-                Log::notice("Replacing existing node name in graph node name registry: {}", newName);
-                (itNew.first)->second = node;
-            }
-        }
-
-        if (!node->name().empty()) {
-            const auto it = mNodeRegistry.find(node->name());
-            AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", node->name(), name());
-            mNodeRegistry.erase(it);
-        }
-    }
+    void updateNodeName(const std::shared_ptr<Node>& node, const std::string& newName);
 
     /**
      * @brief Include a GraphView content in the current GraphView and link
diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
index 3b0874580b112f4c219886a78677e6c9801b72b8..c8de86e90989a6313f47b1b06dea401d5ebd6600 100644
--- a/include/aidge/graph/Matching.hpp
+++ b/include/aidge/graph/Matching.hpp
@@ -12,9 +12,12 @@
 #ifndef AIDGE_CORE_GRAPH_MATCHING_H_
 #define AIDGE_CORE_GRAPH_MATCHING_H_
 
+#include <functional>
 #include <map>
 #include <memory>
 #include <set>
+#include <string>
+
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
@@ -43,10 +46,10 @@ public:
         bool singleOutput = true;
         IOIndex_t edgeLeftIdx = 0;
         IOIndex_t edgeRightIdx = 0;
-        NodePtr startNode;
+        std::shared_ptr<Node> startNode;
 
         // For check & debug purpose:
-        size_t depth = 0;
+        std::size_t depth = 0;
         std::set<std::string> anchors;
     };
 
@@ -56,8 +59,8 @@ public:
         // We use graph->rootNode() as the std::set key, which is guaranteed
         // to never change after insertion!
         mutable std::shared_ptr<GraphView> graph;
-        mutable std::map<std::string, std::map<std::string, NodePtr>> anchors;
-        mutable NodePtr startNode;
+        mutable std::map<std::string, std::map<std::string, std::shared_ptr<Node>>> anchors;
+        mutable std::shared_ptr<Node> startNode;
 
         MatchingResult();
 
@@ -66,11 +69,14 @@ public:
         ~MatchingResult() noexcept;
     };
 
+    SinglePassGraphMatching() = delete;
     SinglePassGraphMatching(std::shared_ptr<GraphView> graph) : mGraph(graph) {}
     SinglePassGraphMatching(const SinglePassGraphMatching& other);
-    SinglePassGraphMatching& operator=(const SinglePassGraphMatching& other);
+
     ~SinglePassGraphMatching() noexcept;
 
+    SinglePassGraphMatching& operator=(const SinglePassGraphMatching& other);
+
     /**
      * Matches a query by direct, single pass parse and match.
      * The returned matches are non-ordered and therefore stored in a std::set.
@@ -141,26 +147,26 @@ public:
 
     /**
      * @brief Same as match() but with a mandatory start node.
-     * 
+     *
      * @param startNode Mandatory start node for the query.
      * @param query The query to search.
      * @return MatchingResult MatchingResult struct, with empty graph if query
      * is not found, or the graph corresponding to the query.
      */
-    MatchingResult matchFrom(NodePtr startNode, const std::string& query);
+    MatchingResult matchFrom(std::shared_ptr<Node> startNode, const std::string& query);
 
     /**
      * Filter to keep only the longest disjoint (non-overlapping) matches.
     */
     std::set<MatchingResult> filterLonguestDisjoint(const std::set<MatchingResult>& matches);
 
-    inline void addNodeLambda(const std::string& name, std::function<bool(const NodePtr&)> func) {
+    inline void addNodeLambda(const std::string& name, std::function<bool(const std::shared_ptr<Node>&)> func) {
         mLambda[name] = func;
     }
 
 private:
     std::shared_ptr<GraphView> mGraph;
-    std::map<std::string, std::function<bool(const NodePtr&)>> mLambda;
+    std::map<std::string, std::function<bool(const std::shared_ptr<Node>&)>> mLambda;
 
     /**
      * QUANTIFIER = '?' | '*' | '+' | ('{' [0-9]+ '}')
@@ -205,13 +211,6 @@ private:
     */
     bool matchNode(Context& ctx, std::set<MatchingResult>& matches);
 
-    inline void removeWhiteSpace(std::string& str) {
-        str.erase(str.begin(),
-            std::find_if(str.begin(),
-                        str.end(),
-                        [](char c) { return !std::isspace(c); }));
-    }
-
     struct CompareMatchingResultSize {
         bool operator()(const MatchingResult& lhs, const MatchingResult& rhs) const {
             // Some matches size could be the same
@@ -225,10 +224,8 @@ private:
     };
 };
 
-inline bool operator<(const Aidge::SinglePassGraphMatching::MatchingResult& lhs, const Aidge::SinglePassGraphMatching::MatchingResult& rhs) {
-    // Matches rootNode are guaranteed to be different!
-    return lhs.graph->rootNode() < rhs.graph->rootNode();
-}
+bool operator<(const SinglePassGraphMatching::MatchingResult& lhs, const SinglePassGraphMatching::MatchingResult& rhs);
+
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_GRAPH_MATCHING_H_ */
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index a16bbd63ecf52e8c97d5032c5c90a5f69186f995..a57ccc91f48ca3285eb8be6ff85a1dbb4aef6d52 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -12,13 +12,13 @@
 #ifndef AIDGE_CORE_GRAPH_NODE_H_
 #define AIDGE_CORE_GRAPH_NODE_H_
 
-#include <cassert>
+#include <deque>
+#include <functional>
 #include <memory>
 #include <set>
 #include <string>
 #include <vector>
-#include <deque>
-#include <utility>
+#include <utility>     // std::pair
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -27,7 +27,9 @@
 
 #include "aidge/graph/Connector.hpp"
 #include "aidge/operator/Operator.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 #ifdef PYBIND
 namespace py = pybind11;
@@ -131,7 +133,7 @@ public:
    * @brief Name of the Node.
    * @return std::string
    */
-  inline std::string name() const noexcept { return mAttrs->getAttr<std::string>("name"); }
+  std::string name() const noexcept { return mAttrs->getAttr<std::string>("name"); }
 
   /**
    * @brief Set the Node name.
@@ -175,7 +177,7 @@ public:
    * @brief Get the Operator object of the Node.
    * @return std::shared_ptr<Operator>
    */
-  inline std::shared_ptr<Operator> getOperator() const { return (*mOperator)(mAttrs); }
+  inline std::shared_ptr<Operator> getOperator() const { return mOperator; }
 //   inline std::shared_ptr<Operator> getOperator() const { return mOperator; }
 
   ///////////////////////////////////////////////////////
@@ -212,7 +214,7 @@ public:
    * @return std::pair<std::shared_ptr<Node>, IOIndex_t>
    */
   inline std::pair<NodePtr, IOIndex_t> input(const IOIndex_t inID) const {
-    assert((inID != gk_IODefaultIndex) && (inID < nbInputs()) && "Input index out of bound.");
+    AIDGE_ASSERT((inID != gk_IODefaultIndex) && (inID < nbInputs()), "Input index out of bound.");
     return std::pair<NodePtr, IOIndex_t>(mParents[inID], mIdOutParents[inID]);
   }
 
@@ -261,7 +263,7 @@ public:
    * @details [data, data, weight, bias] => 4
    * @return IOIndex_t
    */
-  inline IOIndex_t nbInputs() const noexcept { return getOperator()->nbInputs(); }
+  inline IOIndex_t nbInputs() const noexcept { return mOperator->nbInputs(); }
 
   /**
    * @brief Category of a specific input (Data or Param, optional or not).
@@ -269,7 +271,7 @@ public:
    * @return InputCategory
    */
   inline InputCategory inputCategory(IOIndex_t idx) const {
-    return getOperator()->inputCategory(idx);
+    return mOperator->inputCategory(idx);
   }
 
   /**
@@ -279,7 +281,7 @@ public:
    * @return true if the operator defines it as a back edge
    */
   inline bool parentIsBackEdge(IOIndex_t idx) const {
-    return getOperator()->isBackEdge(idx);
+    return mOperator->isBackEdge(idx);
   }
 
   /**
@@ -292,7 +294,7 @@ public:
    * @brief Getter for the number of Output Tensors of the Node.
    * @return IOIndex_t
    */
-  inline IOIndex_t nbOutputs() const noexcept { return getOperator()->nbOutputs(); }
+  inline IOIndex_t nbOutputs() const noexcept { return mOperator->nbOutputs(); }
 
   IOIndex_t nbValidOutputs() const;
 
@@ -304,15 +306,7 @@ public:
    * @brief Set of pointers to each GraphView containing this Node
    * @return std::set<GraphView>
    */
-  inline std::set<std::shared_ptr<GraphView>> views() const noexcept {
-    std::set<std::shared_ptr<GraphView>> res;
-    for (const auto &v : mViews) {
-      if (auto p = v.lock()) {
-        res.insert(p);
-      }
-    }
-    return res;
-  }
+  std::set<std::shared_ptr<GraphView>> views() const noexcept;
 
   /**
    * @brief Add a GraphView pointer to the list of GraphView containing
@@ -323,7 +317,7 @@ public:
     mViews.insert(std::weak_ptr<GraphView>(graphPtr));
   }
 
-  inline void removeView(const std::shared_ptr<GraphView> &graphPtr) {
+  void removeView(const std::shared_ptr<GraphView> &graphPtr) {
     mViews.erase(graphPtr);
   }
 
@@ -368,7 +362,6 @@ public:
    * @return std::shared_ptr<Node>&
    */
   inline NodePtr &getParent(const IOIndex_t inId) {
-    assert(inId != gk_IODefaultIndex);
     return mParents.at(inId);
   }
 
diff --git a/include/aidge/graph/StaticAnalysis.hpp b/include/aidge/graph/StaticAnalysis.hpp
index d3fe681749eeb69e4816a38f302d510f1c81381a..cc5532224ebd00f17aefbf5c2620a3ef15cfaa2a 100644
--- a/include/aidge/graph/StaticAnalysis.hpp
+++ b/include/aidge/graph/StaticAnalysis.hpp
@@ -13,13 +13,12 @@
 #ifndef AIDGE_CORE_GRAPH_STATICANALYSIS_H_
 #define AIDGE_CORE_GRAPH_STATICANALYSIS_H_
 
+#include <cstddef>  // std::size_t
 #include <memory>
+#include <string>
 
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/graph/GraphView.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
-
+#include "aidge/graph/GraphView.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/Conv.hpp"
@@ -27,125 +26,131 @@
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/ReduceSum.hpp"
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/operator/MetaOperator.hpp"
+#include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
 /**
  * @brief Base class to compute statistics from an Operator.
- * 
+ *
  */
 class OperatorStats : public Registrable<OperatorStats, std::string, std::function<std::shared_ptr<OperatorStats>(const Operator&)>> {
 public:
+    OperatorStats() = delete;
     OperatorStats(const Operator& op);
-    const Operator& getOperator() const noexcept { return mOp; }
+
+    virtual ~OperatorStats();
+
+    inline const Operator& getOperator() const noexcept { return mOp; }
 
     /**
-     * @brief Get the worst case total number of arithmetic operations for the 
+     * @brief Get the worst case total number of arithmetic operations for the
      * operator data flow. This includes base arithmetic operations: +, -, / and *.
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
      * Example of Operator with only arithmetic operations: Conv.
-     * 
-     * @return size_t Number of arithmetic operations.
+     *
+     * @return std::size_t Number of arithmetic operations.
      */
-    virtual size_t getNbArithmOps() const { return 2 * getNbMACOps(); };
+    virtual std::size_t getNbArithmOps() const { return 2 * getNbMACOps(); };
 
     /**
-     * @brief Get the worst case total number of logic operations for the 
+     * @brief Get the worst case total number of logic operations for the
      * operator data flow. This includes operations like logical shift, or, and...
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
      * Example of Operator with only logic operations: BitShift.
-     * 
-     * @return size_t Number of logic operations.
+     *
+     * @return std::size_t Number of logic operations.
      */
-    virtual size_t getNbLogicOps() const { return 0; };
+    virtual std::size_t getNbLogicOps() const { return 0; };
 
     /**
-     * @brief Get the worst case total number of comparison operations for the 
+     * @brief Get the worst case total number of comparison operations for the
      * operator data flow. This includes operations like <, >, =...
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
      * Example of Operator with only comparison operations: MaxPool.
-     * 
-     * @return size_t Number of comparison operations.
+     *
+     * @return std::size_t Number of comparison operations.
      */
-    virtual size_t getNbCompOps() const { return 0; };
+    virtual std::size_t getNbCompOps() const { return 0; };
 
     /**
      * @brief Get the worst case total number of non-linear (NL) operations for the
      * operator data flow. This includes operations like calls to tanh(), erf(), cos()...
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
      * Example of Operator with only NL operations: Tanh.
      * Non-linear operations are necessarily of floating-point type.
-     * 
-     * @return size_t Number of non-linear (NL) operations.
+     *
+     * @return std::size_t Number of non-linear (NL) operations.
      */
-    virtual size_t getNbNLOps() const { return 0; };
+    virtual std::size_t getNbNLOps() const { return 0; };
 
     /**
      * @brief Get the worst case total number of operations for the operator data flow.
      * Total number of operations = arithmetic ops + logic ops + comp ops + NL ops.
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
-     * 
-     * @return size_t Number of operations.
+     *
+     * @return std::size_t Number of operations.
      */
-    size_t getNbOps() const { return getNbArithmOps() + getNbLogicOps() + getNbCompOps() + getNbNLOps(); };
+    std::size_t getNbOps() const { return getNbArithmOps() + getNbLogicOps() + getNbCompOps() + getNbNLOps(); };
 
     /**
      * @brief Get the worst case total number of INT arithmetic operations for
      * the operator data flow.
      * Such that getNbArithmOps() = getNbArithmIntOps() + getNbArithmFpOps()
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
-     * 
-     * @return size_t Number of INT arithmetic operations.
+     *
+     * @return std::size_t Number of INT arithmetic operations.
      */
-    virtual size_t getNbArithmIntOps() const;
+    virtual std::size_t getNbArithmIntOps() const;
 
     /**
-     * @brief Get the worst case total number of FP arithmetic operations for 
+     * @brief Get the worst case total number of FP arithmetic operations for
      * the operator data flow.
      * Such that getNbArithmOps() = getNbArithmIntOps() + getNbArithmFpOps()
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
-     * 
-     * @return size_t Number of FP arithmetic operations.
+     *
+     * @return std::size_t Number of FP arithmetic operations.
      */
-    size_t getNbArithmFpOps() const { return getNbArithmOps() - getNbArithmIntOps(); };
+    std::size_t getNbArithmFpOps() const { return getNbArithmOps() - getNbArithmIntOps(); };
 
     /**
      * @brief Get the worst case total number of MAC operations for the operator
-     * data flow. MAC operations are included in getNbArithmOps(), with 1 MAC 
+     * data flow. MAC operations are included in getNbArithmOps(), with 1 MAC
      * operation counted as 2 arithmetic operations. MAC can be INT of FP.
-     * Control flow operations (loop counters, index computation...) and memory 
+     * Control flow operations (loop counters, index computation...) and memory
      * accesses are not included.
-     * A naive implementation is considered (more operations might be required 
+     * A naive implementation is considered (more operations might be required
      * for numerical stability in an actual implementation).
-     * 
-     * @return size_t Number of MAC operations.
+     *
+     * @return std::size_t Number of MAC operations.
      */
-    virtual size_t getNbMACOps() const { return 0; };
-    virtual ~OperatorStats() = default;
+    virtual std::size_t getNbMACOps() const { return 0; };
 
 protected:
     const Operator &mOp;
@@ -153,16 +158,20 @@ protected:
 
 /**
  * @brief Base class to compute statistics from a GraphView
- * 
+ *
  */
 class StaticAnalysis : public std::enable_shared_from_this<StaticAnalysis> {
 public:
+    StaticAnalysis() = delete;
     StaticAnalysis(std::shared_ptr<GraphView> graph);
-    const std::shared_ptr<GraphView> getGraph() const noexcept { return mGraph; }
+
+    virtual ~StaticAnalysis();
+
+    inline const std::shared_ptr<GraphView> getGraph() const noexcept { return mGraph; }
 
     /**
      * @brief Get the Operator Stats object corresponding to the given node.
-     * 
+     *
      * @param node Node
      * @return std::shared_ptr<OperatorStats> Node's Operator stats
      */
@@ -172,65 +181,67 @@ public:
      * @brief Get the number of parameters associated to a node. This includes
      * all Producers directly connected to the node's inputs as well as all
      * internal Producers (in case of a meta operator).
-     * 
+     *
      * Note: this function does not check if parameters are shared between
      * several nodes or not. This means that simply adding parameters count from
      * several nodes may lead to a higher number of parameters than in reality
      * if some of them are shared.
-     * 
+     *
      * @param node Node
-     * @return size_t Number of parameters
+     * @return std::size_t Number of parameters
      */
-    virtual size_t getNbParams(std::shared_ptr<Node> node) const;
+    virtual std::size_t getNbParams(std::shared_ptr<Node> node) const;
 
     /**
      * @brief Get the total parameters memory size, in bits, associated to a node.
-     * This includes all Producers directly connected to the node's inputs as 
+     * This includes all Producers directly connected to the node's inputs as
      * well as all internal Producers (in case of a meta operator).
-     * 
+     *
      * Note: this function does not check if parameters are shared between
      * several nodes or not. This means that simply adding parameters size from
      * several nodes may lead to a higher parameter size than in reality
      * if some of them are shared.
-     * 
+     *
      * @param node Node
-     * @return size_t Total parameters memory, in bits
+     * @return std::size_t Total parameters memory, in bits
      */
-    virtual size_t getParamsSize(std::shared_ptr<Node> node) const;
-
-    size_t getNbArithmOps() const { return accumulate(&OperatorStats::getNbArithmOps); }
-    size_t getNbLogicOps() const { return accumulate(&OperatorStats::getNbLogicOps); }
-    size_t getNbCompOps() const { return accumulate(&OperatorStats::getNbCompOps); }
-    size_t getNbNLOps() const { return accumulate(&OperatorStats::getNbNLOps); }
-    size_t getNbOps() const { return accumulate(&OperatorStats::getNbOps); }
-    size_t getNbArithmIntOps() const { return accumulate(&OperatorStats::getNbArithmIntOps); }
-    size_t getNbArithmFpOps() const { return accumulate(&OperatorStats::getNbArithmFpOps); }
-    size_t getNbMACOps() const { return accumulate(&OperatorStats::getNbMACOps); }
+    virtual std::size_t getParamsSize(std::shared_ptr<Node> node) const;
+
+    std::size_t getNbArithmOps() const;
+    std::size_t getNbLogicOps() const;
+    std::size_t getNbCompOps() const;
+    std::size_t getNbNLOps() const;
+    std::size_t getNbOps() const;
+    std::size_t getNbArithmIntOps() const;
+    std::size_t getNbArithmFpOps() const;
+    std::size_t getNbMACOps() const;
     virtual void summary(bool incProducers = false) const;
-    virtual ~StaticAnalysis() = default;
 
 protected:
     const std::shared_ptr<GraphView> mGraph;
 
-    size_t accumulate(size_t (OperatorStats::*func)() const) const;
+    std::size_t accumulate(std::size_t (OperatorStats::*func)() const) const;
 };
 
 ////////////////////////////////////////////////////////////////////////////////
 
 class MetaOpStats : public OperatorStats {
 public:
+    MetaOpStats() = delete;
     MetaOpStats(const Operator& op) : OperatorStats(op) {}
 
+    ~MetaOpStats();
+
     static std::unique_ptr<MetaOpStats> create(const Operator& op) {
         return std::make_unique<MetaOpStats>(op);
     }
 
-    size_t getNbArithmOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmOps(); }
-    size_t getNbLogicOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbLogicOps(); }
-    size_t getNbCompOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbCompOps(); }
-    size_t getNbNLOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbNLOps(); }
-    size_t getNbArithmIntOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmIntOps(); }
-    size_t getNbMACOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbMACOps(); }
+    std::size_t getNbArithmOps() const override;
+    std::size_t getNbLogicOps() const override;
+    std::size_t getNbCompOps() const override;
+    std::size_t getNbNLOps() const override;
+    std::size_t getNbArithmIntOps() const override;
+    std::size_t getNbMACOps() const override;
 };
 
 template <class OP>
@@ -242,7 +253,7 @@ public:
         return std::make_unique<ConvStats<OP>>(op);
     }
 
-    size_t getNbMACOps() const override {
+    std::size_t getNbMACOps() const override {
         const OP& op_ = dynamic_cast<const OP&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
 	    const std::size_t weightsSize = op_.getInput(1)->size();
@@ -250,7 +261,7 @@ public:
             = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
                               op_.getOutput(0)->dims().cend(),
                               1,
-                              std::multiplies<size_t>()); // NCHW...
+                              std::multiplies<std::size_t>()); // NCHW...
         const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
         return batchSize * (weightsSize * outputSize);
     }
@@ -271,19 +282,19 @@ public:
         return std::make_unique<MaxPoolingStats<OP>>(op);
     }
 
-    size_t getNbCompOps() const override {
+    std::size_t getNbCompOps() const override {
         const OP& op_ = dynamic_cast<const OP&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
 	    const std::size_t poolSize
             = std::accumulate(op_.kernelDims().cbegin(),
                               op_.kernelDims().cend(),
                               1,
-                              std::multiplies<size_t>());
+                              std::multiplies<std::size_t>());
         const std::size_t outputSize
             = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
                               op_.getOutput(0)->dims().cend(),
                               1,
-                              std::multiplies<size_t>()); // NCHW...
+                              std::multiplies<std::size_t>()); // NCHW...
         const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
         return batchSize * ((poolSize - 1) * outputSize);
     }
@@ -302,19 +313,19 @@ public:
         return std::make_unique<AvgPoolingStats<OP>>(op);
     }
 
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const OP& op_ = dynamic_cast<const OP&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
 	    const std::size_t poolSize
             = std::accumulate(op_.kernelDims().cbegin(),
                               op_.kernelDims().cend(),
                               1,
-                              std::multiplies<size_t>());
+                              std::multiplies<std::size_t>());
         const std::size_t outputSize
             = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
                               op_.getOutput(0)->dims().cend(),
                               1,
-                              std::multiplies<size_t>()); // NCHW...
+                              std::multiplies<std::size_t>()); // NCHW...
         const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
         // (poolSize - 1) additions + 1 division for each output
         return batchSize * (poolSize * outputSize);
@@ -334,7 +345,7 @@ public:
         return std::make_unique<FCStats>(op);
     }
 
-    size_t getNbMACOps() const override {
+    std::size_t getNbMACOps() const override {
         const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
 	    const std::size_t weightsSize = op_.getInput(1)->size();
@@ -353,20 +364,20 @@ public:
         return std::make_unique<MatMulStats>(op);
     }
 
-    size_t getNbMACOps() const override {
+    std::size_t getNbMACOps() const override {
         const MatMul_Op& op_ = dynamic_cast<const MatMul_Op&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const size_t n = (op_.getInput(0)->dims().size() > 1)
+        const std::size_t n = (op_.getInput(0)->dims().size() > 1)
             ? op_.getInput(0)->dims().end()[-2] : 1;
-        const size_t k = op_.getInput(0)->dims().back();
-        const size_t m = (op_.getInput(1)->dims().size() > 1)
+        const std::size_t k = op_.getInput(0)->dims().back();
+        const std::size_t m = (op_.getInput(1)->dims().size() > 1)
             ? op_.getInput(1)->dims().back() : 1;
-        const size_t nb = (op_.getInput(0)->dims().size() > 2)
+        const std::size_t nb = (op_.getInput(0)->dims().size() > 2)
             ? std::accumulate(op_.getInput(0)->dims().cbegin(),
                               op_.getInput(0)->dims().cend() - 2,
                               1,
-                              std::multiplies<size_t>())
-            : 1; 
+                              std::multiplies<std::size_t>())
+            : 1;
 
         return nb * n * m * k;
     }
@@ -382,7 +393,7 @@ public:
         return std::make_unique<ReLUStats>(op);
     }
 
-    size_t getNbCompOps() const override {
+    std::size_t getNbCompOps() const override {
         const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
         return op_.getOutput(0)->size();
@@ -399,14 +410,14 @@ public:
         return std::make_unique<AbsStats>(op);
     }
 
-    size_t getNbCompOps() const override {
+    std::size_t getNbCompOps() const override {
         const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
         return op_.getOutput(0)->size();
     }
 
     // This is in the worst case (all values are negative)
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
         return op_.getOutput(0)->size();
@@ -423,12 +434,12 @@ public:
         return std::make_unique<ReduceMeanStats>(op);
     }
 
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const ReduceMean_Op& op_ = dynamic_cast<const ReduceMean_Op&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const size_t nbIn = op_.getInput(0)->size();
-        const size_t nbOut = op_.getOutput(0)->size();
-        const size_t nbReduce = nbIn / nbOut;
+        const std::size_t nbIn = op_.getInput(0)->size();
+        const std::size_t nbOut = op_.getOutput(0)->size();
+        const std::size_t nbReduce = nbIn / nbOut;
         // (nbReduce - 1) additions + 1 division for each output
         return nbOut * nbReduce;
     }
@@ -444,12 +455,12 @@ public:
         return std::make_unique<ReduceSumStats>(op);
     }
 
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const ReduceSum_Op& op_ = dynamic_cast<const ReduceSum_Op&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const size_t nbIn = op_.getInput(0)->size();
-        const size_t nbOut = op_.getOutput(0)->size();
-        const size_t nbReduce = nbIn / nbOut;
+        const std::size_t nbIn = op_.getInput(0)->size();
+        const std::size_t nbOut = op_.getOutput(0)->size();
+        const std::size_t nbReduce = nbIn / nbOut;
         // (nbReduce - 1) additions for each output
         return nbOut * (nbReduce - 1);
     }
@@ -465,22 +476,22 @@ public:
         return std::make_unique<SoftmaxStats>(op);
     }
 
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const Softmax_Op& op_ = dynamic_cast<const Softmax_Op&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
-        const size_t nbReduce = op_.getInput(0)->dims()[axis];
-        const size_t nbOut = op_.getOutput(0)->size();
+        const std::size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
+        const std::size_t nbReduce = op_.getInput(0)->dims()[axis];
+        const std::size_t nbOut = op_.getOutput(0)->size();
         // nbOut divisions + (nbReduce - 1) additions
         return nbOut + (nbReduce - 1);
     }
 
-    size_t getNbNLOps() const override {
+    std::size_t getNbNLOps() const override {
         const Softmax_Op& op_ = dynamic_cast<const Softmax_Op&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
-        const size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
-        const size_t nbReduce = op_.getInput(0)->dims()[axis];
-        const size_t nbOut = op_.getOutput(0)->size();
+        const std::size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
+        const std::size_t nbReduce = op_.getInput(0)->dims()[axis];
+        const std::size_t nbOut = op_.getOutput(0)->size();
         // nbOut exp + nbReduce exp
         return nbOut + nbReduce;
     }
@@ -515,7 +526,7 @@ public:
         return std::make_unique<ElemWiseOpStats>(op);
     }
 
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
         return op_.getOutput(0)->size();
@@ -535,7 +546,7 @@ public:
         return std::make_unique<ElemWiseLogicOpStats>(op);
     }
 
-    size_t getNbArithmOps() const override {
+    std::size_t getNbArithmOps() const override {
         const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
         return op_.getOutput(0)->size();
@@ -552,7 +563,7 @@ public:
         return std::make_unique<ElemWiseNLOpStats>(op);
     }
 
-    size_t getNbNLOps() const override {
+    std::size_t getNbNLOps() const override {
         const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
         AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
         return op_.getOutput(0)->size();
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index 13f63ce98c526f0c57a363ada4e7f50ccdbfb83b..a2d344cba3dd7af3b6f0b2b4078852afcd0ae8cf 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -52,12 +52,12 @@ public:
     /**
      * @brief constructor for ArgMax op
      * @param[in] axis around which perform the operation
-     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axis and 
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axis and
      * if false we remove the dimension completely
-     * @param[in] select_last_index in case we have many maximum, if true the last index is returned 
-     * if false the first index is returned. 
+     * @param[in] select_last_index in case we have many maximum, if true the last index is returned
+     * if false the first index is returned.
      */
-    ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_index)
+    ArgMax_Op(std::int32_t axis = 0, bool keep_dims = true, bool select_last_index = false)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
             attr<ArgMaxAttr::Axis>(axis),
@@ -69,24 +69,13 @@ public:
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ArgMax_Op(const ArgMax_Op& op)
-        : OperatorTensor(op),
-          mAttributes(op.mAttributes)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ArgMax_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    ArgMax_Op(const ArgMax_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ArgMax_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ArgMax_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -114,17 +103,14 @@ public:
  * @param axis Dimension over which data max should be computed.
  * @param keep_dims Whether or not reduced dimensions are to be erased.
  * @param select_last_index Whether to select the last index of max elements in case there are many maximums.
- * By default the first max element index is 
+ * By default the first max element index is
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> Node containing the Operator.
  */
-inline std::shared_ptr<Node> ArgMax(std::int32_t axis=0,
-                                    bool keep_dims=true,
-                                    bool select_last_index=false,
-                                    const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<ArgMax_Op>(axis, keep_dims, select_last_index), name);
-
-}
+std::shared_ptr<Node> ArgMax(std::int32_t axis = 0,
+                                    bool keep_dims = true,
+                                    bool select_last_index = false,
+                                    const std::string& name = "");
 
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 95698b751a9f0f4c0cc8e716eb5140ee74e21a3f..5f148e126b889c422392923fa33ea1cffbbd654e 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -12,11 +12,11 @@
 #ifndef AIDGE_CORE_OPERATOR_OPERATOR_H_
 #define AIDGE_CORE_OPERATOR_OPERATOR_H_
 
+#include <cstddef>
 #include <memory>
 #include <string>
 #include <vector>
 #include <utility>
-#include <cstddef>
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -49,11 +49,19 @@ enum class InputCategory {
 
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
-    std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
-    std::shared_ptr<DynamicAttributes> mInheritedAttrs;
+    /** Implementation of the operator. */
+    std::shared_ptr<OperatorImpl> mImpl;
+    /** Attributes of the associated Node.
+     *
+     * Default to empty vector for copy construction because two Operator cannot
+     * be associated to the same Node.
+     */
+    std::vector<std::shared_ptr<DynamicAttributes>> mInheritedAttrs{};
 
 private:
+    /** Type of Operator. */
     std::string mType;
+    /** Type of data the Operator should handle. */
     const OperatorType mOperatorType;
     const std::vector<InputCategory> mInputsCategory;
     const IOIndex_t mNbOut;
@@ -82,18 +90,34 @@ public:
         // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
         // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
     }
-    std::shared_ptr<Operator> operator()(std::shared_ptr<DynamicAttributes> attrs) {
-        mInheritedAttrs = attrs;
-        return shared_from_this();
-    }
+    // std::shared_ptr<Operator> operator()(std::shared_ptr<DynamicAttributes> attrs) {
+    //     mInheritedAttrs = attrs;
+    //     return shared_from_this();
+    // }
 
     virtual ~Operator() noexcept;
 
 public:
+    void setInheritedAttrs(std::shared_ptr<DynamicAttributes>& attr) {
+        mInheritedAttrs.push_back(attr);
+    }
     virtual std::shared_ptr<Operator> clone() const = 0;
 
     virtual std::shared_ptr<Attributes> attributes() const { return nullptr; };
-    virtual std::shared_ptr<DynamicAttributes> inheritedAttributes() const { return mInheritedAttrs; };
+
+    /**
+     * @brief Get the currently associated Node's attributes.
+     * @return Shared pointer to the Attributes of the associated Node.
+     *
+     * If no Node as be associated to the Operator, returns a `nullptr`.
+     * @note As Operators have only been tested with a single associated Node,
+     * only attributes of the first associated Node are returned. This should be
+     * updated.
+     */
+    virtual std::shared_ptr<DynamicAttributes> inheritedAttributes() const {
+        return mInheritedAttrs.empty() ? nullptr : mInheritedAttrs[0];
+    }
+
     /**
      * @brief Set the specified input with a shallow copy.
      * @param inputIdx Index of the input to set.
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 0fc350f1a10227e417f3b09baf2c7bebeb84d875..6ac76c138e2a835f8e74c5ede26e449c537d61d2 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -365,7 +365,7 @@ public:
     static inline typename std::enable_if<!has_less_than_operator<T>::value, void>::type makeTypeConditionallyAvailable() {}
 
     template<typename T>
-    static inline typename std::enable_if<has_less_than_operator<T>::value, void>::type makeTypeConditionallyAvailable() {
+    static typename std::enable_if<has_less_than_operator<T>::value, void>::type makeTypeConditionallyAvailable() {
         mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
     }
 
@@ -388,7 +388,7 @@ struct DynamicAttributes::AnyUtils<py::object> : public DynamicAttributes::AnyUt
 
     size_t hash(const future_std::any& attr) const override final {
         // Here we are mixing Python and C++ hashes... if both are
-        // well implemented, this should not increase the collision 
+        // well implemented, this should not increase the collision
         // probability for the same number of stored hashes.
         return py::hash(future_std::any_cast<py::object>(attr));
     }
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index d6851f1e42233f9d8af88d10da9046f73f94b8c4..bc99ab7c0362f5bfa4c2f1bbc01f3089d28d699f 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -19,7 +19,6 @@
 #include <fmt/ranges.h>
 
 #include "aidge/data/half_fmt.hpp"
-
 #include "aidge/utils/Attributes.hpp"
 
 namespace Aidge {
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index ee19796098bf1d755448d833aa6a8a2c24180baa..c47f3c33efd8348f8bac4f0ab2221e39e3d5e62a 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -28,6 +28,12 @@
 
 namespace Aidge {
 
+Tensor::Tensor(const Tensor& other) = default;
+Tensor::Tensor(Tensor&& other) = default;
+
+Tensor& Tensor::operator=(const Tensor& other) = default;
+Tensor& Tensor::operator=(Tensor&& other) = default;
+
 Tensor::~Tensor() noexcept = default;
 
 
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 465359757eadd2799aa7f272e2d85b032a60cfdd..4c6f6ada8fdf069c308398f7b978e1d44fde8f65 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -940,6 +940,22 @@ void Aidge::GraphView::addChild(
   add(toOtherView);
 }
 
+void Aidge::GraphView::updateNodeName(const std::shared_ptr<Node>& node, const std::string& newName) {
+    if (!newName.empty()) {
+        auto itNew = mNodeRegistry.insert(std::make_pair(newName, node));
+        if (!itNew.second) {
+            Log::notice("Replacing existing node name in graph node name registry: {}", newName);
+            (itNew.first)->second = node;
+        }
+    }
+
+    if (!node->name().empty()) {
+        const auto it = mNodeRegistry.find(node->name());
+        AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", node->name(), name());
+        mNodeRegistry.erase(it);
+    }
+}
+
 std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents() const {
   // TODO: choose if we return a set or a vector
   std::set<std::shared_ptr<Node>> parents;
diff --git a/src/graph/Matching.cpp b/src/graph/Matching.cpp
index 4a62019a7aa044ebcf2089d91f3ba097d85218e7..ddf9bcbf946b6fe8e86c9a14679b951ebd88323f 100644
--- a/src/graph/Matching.cpp
+++ b/src/graph/Matching.cpp
@@ -1,11 +1,43 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
 #include "aidge/graph/Matching.hpp"
 
+#include <algorithm>   // std::find_if
+#include <cctype>      // std::isspace
+#include <cstddef>     // std::size_t
+#include <memory>
+#include <set>
+#include <string>      // std::stoi
+#include <utility>     // std::pair
+#include <vector>
+
 #include <fmt/color.h>
 
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+
+static void removeLeadingWhitespace(std::string& str) {
+    str.erase(str.begin(),
+        std::find_if(str.cbegin(),
+                    str.cend(),
+                    [](char c) { return !std::isspace(c); }));
+}
+
+////////////////////////////////////////////////////////////
+
 Aidge::SinglePassGraphMatching::Context::Context() = default;
 Aidge::SinglePassGraphMatching::Context::Context(const Context& other) = default;
 Aidge::SinglePassGraphMatching::Context& Aidge::SinglePassGraphMatching::Context::operator=(const Context& other) = default;
-Aidge::SinglePassGraphMatching::Context::~Context() = default;
+Aidge::SinglePassGraphMatching::Context::~Context() noexcept = default;
 
 ////////////////////////////////////////////////////////////
 
@@ -35,7 +67,7 @@ std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphM
     std::set<MatchingResult> matches;
 
     while (matchSequence(ctx, matches) || matchNodeOrBlock(ctx, matches)) {
-        removeWhiteSpace(ctx.query);
+        removeLeadingWhitespace(ctx.query);
         if (!ctx.query.empty() && ctx.query[0] == ';') {
             ctx.query.erase(0, 1);
         }
@@ -44,7 +76,7 @@ std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphM
         }
     }
 
-    removeWhiteSpace(ctx.query);
+    removeLeadingWhitespace(ctx.query);
     if (!ctx.query.empty()) {
         Log::warn("Syntax error, unable to parse remaining query: {}", ctx.query);
     }
@@ -56,14 +88,14 @@ std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphM
     return matches;
 }
 
-Aidge::SinglePassGraphMatching::MatchingResult Aidge::SinglePassGraphMatching::matchFrom(NodePtr startNode, const std::string& query) {
+Aidge::SinglePassGraphMatching::MatchingResult Aidge::SinglePassGraphMatching::matchFrom(std::shared_ptr<Node> startNode, const std::string& query) {
     Context ctx;
     ctx.query = query;
     ctx.startNode = startNode;
     std::set<MatchingResult> matches;
 
     while (matchSequence(ctx, matches) || matchNodeOrBlock(ctx, matches)) {
-        removeWhiteSpace(ctx.query);
+        removeLeadingWhitespace(ctx.query);
         if (!ctx.query.empty() && ctx.query[0] == ';') {
             ctx.query.erase(0, 1);
         }
@@ -72,7 +104,7 @@ Aidge::SinglePassGraphMatching::MatchingResult Aidge::SinglePassGraphMatching::m
         }
     }
 
-    removeWhiteSpace(ctx.query);
+    removeLeadingWhitespace(ctx.query);
     if (!ctx.query.empty()) {
         Log::warn("Syntax error, unable to parse remaining query: {}", ctx.query);
     }
@@ -123,8 +155,8 @@ bool Aidge::SinglePassGraphMatching::matchNodeOrBlock(Context& ctx, std::set<Mat
 
     // QUANTIFIER?
     bool matchMore = false;
-    size_t matchQuantity = 0;
-    removeWhiteSpace(newCtx.query);
+    std::size_t matchQuantity = 0;
+    removeLeadingWhitespace(newCtx.query);
     if (!newCtx.query.empty() && (newCtx.query[0] == '?' || newCtx.query[0] == '*')) {
         AIDGE_ASSERT(!(ctx.firstSequence && ctx.firstNode),
             "Ill-formed query; the root node cannot be optional in query at: {}", ctx.query);
@@ -155,7 +187,7 @@ bool Aidge::SinglePassGraphMatching::matchNodeOrBlock(Context& ctx, std::set<Mat
     else if (!newCtx.query.empty() && newCtx.query[0] == '{') {
         newCtx.query.erase(0, 1);
 
-        removeWhiteSpace(newCtx.query);
+        removeLeadingWhitespace(newCtx.query);
         const auto endQuantity = std::find_if(newCtx.query.begin(), newCtx.query.end(),
             [](char c) { return !isdigit(c); });
         if (endQuantity != newCtx.query.begin()) {
@@ -172,7 +204,7 @@ bool Aidge::SinglePassGraphMatching::matchNodeOrBlock(Context& ctx, std::set<Mat
             return false;
         }
 
-        removeWhiteSpace(newCtx.query);
+        removeLeadingWhitespace(newCtx.query);
         if (!newCtx.query.empty() && newCtx.query[0] == '}') {
             newCtx.query.erase(0, 1);
         }
@@ -231,7 +263,7 @@ bool Aidge::SinglePassGraphMatching::matchBlock(Context& ctx, std::set<MatchingR
     ++newCtx.depth;
 
     // '('
-    removeWhiteSpace(newCtx.query);
+    removeLeadingWhitespace(newCtx.query);
     if (!newCtx.query.empty() && newCtx.query[0] == '(') {
         newCtx.query.erase(0, 1);
     }
@@ -252,7 +284,7 @@ bool Aidge::SinglePassGraphMatching::matchBlock(Context& ctx, std::set<MatchingR
     }
 
     // ')'
-    removeWhiteSpace(newCtx.query);
+    removeLeadingWhitespace(newCtx.query);
     if (!newCtx.query.empty() && newCtx.query[0] == ')') {
         newCtx.query.erase(0, 1);
     }
@@ -337,7 +369,7 @@ bool Aidge::SinglePassGraphMatching::matchParallel(Context& ctx, std::set<Matchi
     while (true) {
         // ('&' NODE_OR_BLOCK)+
         //   '&'
-        removeWhiteSpace(newCtx.query);
+        removeLeadingWhitespace(newCtx.query);
         if (!newCtx.query.empty() && newCtx.query[0] == '&') {
             newCtx.query.erase(0, 1);
             found = true;
@@ -402,7 +434,7 @@ bool Aidge::SinglePassGraphMatching::matchAlternative(Context& ctx, std::set<Mat
     while (true) {
         // ('|' NODE_OR_BLOCK)+
         //    '|'
-        removeWhiteSpace(newCtx.query);
+        removeLeadingWhitespace(newCtx.query);
         if (!newCtx.query.empty() && newCtx.query[0] == '|') {
             newCtx.query.erase(0, 1);
             found = true;
@@ -446,7 +478,7 @@ bool Aidge::SinglePassGraphMatching::matchEdge(Context& ctx, std::set<MatchingRe
     Log::debug("{}edge", std::string(2*newCtx.depth, ' '));
 
     // ('-' | '~') or '<'
-    removeWhiteSpace(newCtx.query);
+    removeLeadingWhitespace(newCtx.query);
     if (!newCtx.query.empty() && (newCtx.query[0] == '-' || newCtx.query[0] == '~')) {
         newCtx.singleOutput = (newCtx.query[0] == '-');
         newCtx.query.erase(0, 1); // drop '-'
@@ -550,7 +582,7 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     auto newMatches = matches;
 
     // (TYPE | '.' | '$')
-    removeWhiteSpace(newCtx.query);
+    removeLeadingWhitespace(newCtx.query);
     if (newCtx.query.empty()) {
         Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
         return false;
@@ -833,3 +865,8 @@ bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingRe
     matches = newMatches;
     return true;
 }
+
+bool Aidge::operator<(const Aidge::SinglePassGraphMatching::MatchingResult& lhs, const Aidge::SinglePassGraphMatching::MatchingResult& rhs) {
+    // Matching rootNode are guaranteed to be different!
+    return lhs.graph->rootNode() < rhs.graph->rootNode();
+}
\ No newline at end of file
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 92ae463085a3583dfe894a1b9f6119fa0b099287..384e946c674641f7d498d8c5745dcc745f34d751 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -35,6 +35,7 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttribute
     mForward.push_back([this](){ this->mOperator->forward(); return true; });
     // mForward.push_back(std::bind(&Operator::forward, mOperator.get()));
     mBackward.push_back([this](){ this->mOperator->backward(); return true; });
+    op->setInheritedAttrs(attrs);
 }
 
 // Aidge::Node::Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs)
@@ -225,6 +226,16 @@ Aidge::IOIndex_t Aidge::Node::nbValidOutputs() const {
     return counter;
 }
 
+std::set<std::shared_ptr<Aidge::GraphView>> Aidge::Node::views() const noexcept {
+    std::set<std::shared_ptr<GraphView>> res;
+    for (const auto &v : mViews) {
+        if (auto p = v.lock()) {
+        res.insert(p);
+        }
+    }
+    return res;
+}
+
 void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId) {
     AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
         "Input index ({}) is out of bound ({}) for node {} (of type {})",
diff --git a/src/graph/StaticAnalysis.cpp b/src/graph/StaticAnalysis.cpp
index 033e51022842983caacba9385248c9f02c1e5568..4309c5c37b72dea9f07f8e5a2e7ce7678090b2e2 100644
--- a/src/graph/StaticAnalysis.cpp
+++ b/src/graph/StaticAnalysis.cpp
@@ -11,13 +11,31 @@
 
 #include "aidge/graph/StaticAnalysis.hpp"
 
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <numeric>  // std::accumulate
+#include <set>
+
+#include <fmt/core.h>  // fmt::println
+#include <fmt/format.h>
+#include <fmt/ranges.h>
+
+#include "aidge/data/Data.hpp"  // Aidge::isDataTypeFloatingPoint
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
 Aidge::OperatorStats::OperatorStats(const Operator& op)
   : mOp(op)
 {
     //ctor
 }
 
-size_t Aidge::OperatorStats::getNbArithmIntOps() const {
+Aidge::OperatorStats::~OperatorStats() = default;
+
+std::size_t Aidge::OperatorStats::getNbArithmIntOps() const {
     const auto opTensor = dynamic_cast<const OperatorTensor*>(&mOp);
     if (opTensor) {
         if (!isDataTypeFloatingPoint(opTensor->getOutput(0)->dataType())) {
@@ -27,23 +45,27 @@ size_t Aidge::OperatorStats::getNbArithmIntOps() const {
     return 0;
 }
 
+////////////////////////////////////////////////////////////////////////////////
+
 Aidge::StaticAnalysis::StaticAnalysis(std::shared_ptr<GraphView> graph)
   : mGraph(graph)
 {
     //ctor
 }
 
+Aidge::StaticAnalysis::~StaticAnalysis() = default;
+
 void Aidge::StaticAnalysis::summary(bool incProducers) const {
     fmt::println("--------------------------------------------------------------------------------");
     fmt::println("                        Layer (type)               Output Shape         Param #");
     fmt::println("================================================================================");
 
-    size_t nbParams = 0;
-    size_t paramsSize = 0;  // Size in bits
-    size_t fwdBwdSize = 0;  // Size in bits
+    std::size_t nbParams = 0;
+    std::size_t paramsSize = 0;  // Size in bits
+    std::size_t fwdBwdSize = 0;  // Size in bits
 
     const auto namePtrTable = mGraph->getRankedNodesName("{0} ({1}#{3})");
-    for (const auto node : mGraph->getOrderedNodes()) {
+    for (const auto& node : mGraph->getOrderedNodes()) {
         if (node->type() == Producer_Op::Type && !incProducers) {
             continue;
         }
@@ -53,8 +75,8 @@ void Aidge::StaticAnalysis::summary(bool incProducers) const {
         if (opTensor) {
             const auto outputDims = opTensor->getOutput(0)->dims();
             outputDimsStr = fmt::format("{: >27}", fmt::format("{}", outputDims));
-  
-            for (size_t out = 0; out < node->nbOutputs(); ++out) {
+
+            for (std::size_t out = 0; out < node->nbOutputs(); ++out) {
                 const auto output = opTensor->getOutput(out);
                 if (output && node->type() != Producer_Op::Type) {
                     fwdBwdSize += output->size()
@@ -69,8 +91,8 @@ void Aidge::StaticAnalysis::summary(bool incProducers) const {
           namePtrTable.at(node), outputDimsStr, getNbParams(node));
     }
 
-    size_t inputSize = 0;  // Size in bits
-    for (const auto input : mGraph->getOrderedInputs()) {
+    std::size_t inputSize = 0;  // Size in bits
+    for (const auto& input : mGraph->getOrderedInputs()) {
         if (input.first) {
             auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator());
             if (opTensor && opTensor->getInput(input.second)) {
@@ -90,13 +112,13 @@ void Aidge::StaticAnalysis::summary(bool incProducers) const {
     fmt::println("--------------------------------------------------------------------------------");
 }
 
-size_t Aidge::StaticAnalysis::getNbParams(std::shared_ptr<Node> node) const {
+std::size_t Aidge::StaticAnalysis::getNbParams(std::shared_ptr<Node> node) const {
     const auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator());
 
-    size_t nbParams = 0;
+    std::size_t nbParams = 0;
 
     // Look for Producers directly attached to the node's inputs.
-    size_t i = 0;
+    std::size_t i = 0;
     for (auto parent : node->inputs()) {
         if (parent.first && mGraph->inView(parent.first)) {
             if (parent.first->type() == Producer_Op::Type && opTensor->getInput(i)) {
@@ -109,7 +131,7 @@ size_t Aidge::StaticAnalysis::getNbParams(std::shared_ptr<Node> node) const {
     // Look for internal Producers, in case of meta-op.
     if (!node->getOperator()->isAtomic()) {
         const auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(node->getOperator())->getMicroGraph();
-        for (const auto internalNode : microGraph->getNodes()) {
+        for (const auto& internalNode : microGraph->getNodes()) {
             if (internalNode->type() == Producer_Op::Type) {
                 const auto internalOpTensor = std::dynamic_pointer_cast<OperatorTensor>(internalNode->getOperator());
                 nbParams += internalOpTensor->getOutput(0)->size();
@@ -120,14 +142,14 @@ size_t Aidge::StaticAnalysis::getNbParams(std::shared_ptr<Node> node) const {
     return nbParams;
 }
 
-size_t Aidge::StaticAnalysis::getParamsSize(std::shared_ptr<Node> node) const {
+std::size_t Aidge::StaticAnalysis::getParamsSize(std::shared_ptr<Node> node) const {
     const auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator());
 
-    size_t paramsSize = 0;
+    std::size_t paramsSize = 0;
 
     // Look for Producers directly attached to the node's inputs.
-    size_t i = 0;
-    for (auto parent : node->inputs()) {
+    std::size_t i = 0;
+    for (const auto& parent : node->inputs()) {
         if (parent.first && mGraph->inView(parent.first)) {
             if (parent.first->type() == Producer_Op::Type && opTensor->getInput(i)) {
                 paramsSize += opTensor->getInput(i)->size()
@@ -140,7 +162,7 @@ size_t Aidge::StaticAnalysis::getParamsSize(std::shared_ptr<Node> node) const {
     // Look for internal Producers, in case of meta-op.
     if (!node->getOperator()->isAtomic()) {
         const auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(node->getOperator())->getMicroGraph();
-        for (const auto internalNode : microGraph->getNodes()) {
+        for (const auto& internalNode : microGraph->getNodes()) {
             if (internalNode->type() == Producer_Op::Type) {
                 const auto internalOpTensor = std::dynamic_pointer_cast<OperatorTensor>(internalNode->getOperator());
                 paramsSize += internalOpTensor->getOutput(0)->size()
@@ -160,12 +182,32 @@ std::shared_ptr<Aidge::OperatorStats> Aidge::StaticAnalysis::getOpStats(std::sha
             : std::make_shared<MetaOpStats>(*(node->getOperator()));
 }
 
-size_t Aidge::StaticAnalysis::accumulate(size_t (OperatorStats::*func)() const) const {
+std::size_t Aidge::StaticAnalysis::getNbArithmOps() const { return accumulate(&OperatorStats::getNbArithmOps); }
+std::size_t Aidge::StaticAnalysis::getNbLogicOps() const { return accumulate(&OperatorStats::getNbLogicOps); }
+std::size_t Aidge::StaticAnalysis::getNbCompOps() const { return accumulate(&OperatorStats::getNbCompOps); }
+std::size_t Aidge::StaticAnalysis::getNbNLOps() const { return accumulate(&OperatorStats::getNbNLOps); }
+std::size_t Aidge::StaticAnalysis::getNbOps() const { return accumulate(&OperatorStats::getNbOps); }
+std::size_t Aidge::StaticAnalysis::getNbArithmIntOps() const { return accumulate(&OperatorStats::getNbArithmIntOps); }
+std::size_t Aidge::StaticAnalysis::getNbArithmFpOps() const { return accumulate(&OperatorStats::getNbArithmFpOps); }
+std::size_t Aidge::StaticAnalysis::getNbMACOps() const { return accumulate(&OperatorStats::getNbMACOps); }
+
+std::size_t Aidge::StaticAnalysis::accumulate(std::size_t (OperatorStats::*func)() const) const {
     return std::accumulate(
         mGraph->getNodes().cbegin(),
         mGraph->getNodes().cend(),
         std::size_t(0),
-        [this, func](const size_t& lhs, const std::shared_ptr<Node>& rhs) {
+        [this, func](const std::size_t& lhs, const std::shared_ptr<Node>& rhs) {
             return lhs + (this->getOpStats(rhs).get()->*func)();
         });
 }
+
+////////////////////////////////////////////////////////////////////////////////
+
+Aidge::MetaOpStats::~MetaOpStats() = default;
+
+std::size_t Aidge::MetaOpStats::getNbArithmOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmOps(); }
+std::size_t Aidge::MetaOpStats::getNbLogicOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbLogicOps(); }
+std::size_t Aidge::MetaOpStats::getNbCompOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbCompOps(); }
+std::size_t Aidge::MetaOpStats::getNbNLOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbNLOps(); }
+std::size_t Aidge::MetaOpStats::getNbArithmIntOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmIntOps(); }
+std::size_t Aidge::MetaOpStats::getNbMACOps() const { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbMACOps(); }
\ No newline at end of file
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
index 4808b730d2261ba0c1ea6d0d09871b1f322fc8fb..531c41596ed4ef553a4b7ec7d2642b778044cc66 100644
--- a/src/operator/ArgMax.cpp
+++ b/src/operator/ArgMax.cpp
@@ -14,7 +14,6 @@
 #include <cstddef>    // std::size_t
 #include <cstdint>    // std::int32_t
 #include <memory>
-#include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
 
@@ -25,6 +24,21 @@
 
 const std::string Aidge::ArgMax_Op::Type = "ArgMax";
 
+Aidge::ArgMax_Op::ArgMax_Op(const Aidge::ArgMax_Op& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ArgMax_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ArgMax_Op::clone() const {
+    return std::make_shared<ArgMax_Op>(*this);
+}
+
 bool Aidge::ArgMax_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // make Axis attribute positive
@@ -55,3 +69,12 @@ void Aidge::ArgMax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
 std::set<std::string> Aidge::ArgMax_Op::getAvailableBackends() const {
     return Registrar<ArgMax_Op>::getKeys();
 }
+
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ArgMax(std::int32_t axis,
+                                    bool keep_dims,
+                                    bool select_last_index,
+                                    const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ArgMax_Op>(axis, keep_dims, select_last_index), name);
+}
\ No newline at end of file
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 50c8f561c1732d6f7f37ae5b8d6f03c4e135939c..55be9636f4596b0deeb81d0174b717a91ff76644 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -117,7 +117,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
 
             auto prod = addProducer(metaNode, inputIdx, {convNbOutChannels}, "b");
             // Add the new bias node to the same views as the meta node
-            for (auto g : metaNode->views()) {
+            for (auto& g : metaNode->views()) {
                 g->add(prod);
             }
         }
@@ -126,12 +126,12 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
             if (convNode->input(1).first) {
                 // Add the new bias node to the same views as the weights node
                 // if possible
-                for (auto g : convNode->input(1).first->views()) {
+                for (auto& g : convNode->input(1).first->views()) {
                     g->add(prod);
                 }
             }
             else {
-                for (auto g : convNode->views()) {
+                for (auto& g : convNode->views()) {
                     g->add(prod);
                 }
             }