diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index 73b85c8a409e675c849b9ca66557c63b5acf6359..cd56a55fa7e9cbcefba4715188fd270462e81976 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -27,6 +27,8 @@ build:ubuntu_python:
     - python3 -m pip install virtualenv
     - virtualenv venv
     - source venv/bin/activate
+    # Numpy dependancy for unit test
+    - python3 -m pip install numpy
     - export AIDGE_INSTALL=`pwd`/install
     - python3 -m pip install .
   artifacts:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b764086c8e974dc53aadd345cdd287918d599afb..40d8837f41bdc0d8dfd7eac1c5960064967f1efb 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -66,6 +66,10 @@ endif()
 
 target_compile_features(${module_name} PRIVATE cxx_std_14)
 
+# -fvisibility=hidden required by pybind11
+target_compile_options(${module_name} PUBLIC
+    $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
+    -fvisibility=hidden>)
 target_compile_options(${module_name} PRIVATE
     $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
     -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>)
diff --git a/README.md b/README.md
index 992344a796a4634a25d2127fc49b57adeae45863..5b07e147cb05c2fa1a6d275d567dda218b131996 100644
--- a/README.md
+++ b/README.md
@@ -6,16 +6,19 @@ You can find here the C++ code of the Core library of Aidge.
 
 ## Pip installation
 
-To install aidge_core using pip, make sure to set the desired install path :
-``` bash 
-export AIDGE_INSTALL = '<path_to_aidge>/install'
-```
 
-Then run in your python environnement :
+
+To install aidge_core using pip, run the following command in your python environnement :
 ``` bash
 pip install . -v
 ```
 
+**Note:** you can specify a custom install folder by setting an environment variable:
+
+``` bash
+export AIDGE_INSTALL='<path_to_aidge>/install'
+```
+
 ## Standard C++ Compilation
 
 Create two directories ``build`` and ``ìnstall``.
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index b326e0748c2c77612dd79122fe891a6207d945dc..fc60f52274162155f8f891bf86c22c9a13b241f4 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -30,36 +30,77 @@ class test_operator_binding(unittest.TestCase):
         self.assertNotEqual(gop.name(), "")
 
     def test_param_bool(self):
-        self.generic_operator.add_parameter("bool", True)
-        self.assertEqual(self.generic_operator.get_parameter("bool"), True)
+        self.generic_operator.add_attr("bool", True)
+        self.assertEqual(self.generic_operator.has_attr("bool"), True)
+        self.assertEqual(self.generic_operator.get_attr("bool"), True)
+        self.assertEqual(self.generic_operator.get_attr_type("bool"), "bool")
+        self.assertEqual(self.generic_operator.get_attrs_name(), {"bool"})
+        self.generic_operator.del_attr("bool")
+        self.assertEqual(self.generic_operator.has_attr("bool"), False)
+        self.assertEqual(len(self.generic_operator.get_attrs_name()), 0)
 
     def test_param_int(self):
-        self.generic_operator.add_parameter("int", 1)
-        self.assertEqual(self.generic_operator.get_parameter("int"), 1)
+        self.generic_operator.add_attr("int", 1)
+        self.assertEqual(self.generic_operator.get_attr("int"), 1)
 
     def test_param_float(self):
-        self.generic_operator.add_parameter("float", 2.0)
-        self.assertEqual(self.generic_operator.get_parameter("float"), 2.0)
+        self.generic_operator.add_attr("float", 2.0)
+        self.assertEqual(self.generic_operator.get_attr("float"), 2.0)
 
     def test_param_str(self):
-        self.generic_operator.add_parameter("str", "value")
-        self.assertEqual(self.generic_operator.get_parameter("str"), "value")
+        self.generic_operator.add_attr("str", "value")
+        self.assertEqual(self.generic_operator.get_attr("str"), "value")
 
     def test_param_l_int(self):
-        self.generic_operator.add_parameter("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
-        self.assertEqual(self.generic_operator.get_parameter("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.generic_operator.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.assertEqual(self.generic_operator.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
 
     def test_param_l_bool(self):
-        self.generic_operator.add_parameter("l_bool", [True, False, False, True])
-        self.assertEqual(self.generic_operator.get_parameter("l_bool"), [True, False, False, True])
+        self.generic_operator.add_attr("l_bool", [True, False, False, True])
+        self.assertEqual(self.generic_operator.get_attr("l_bool"), [True, False, False, True])
 
     def test_param_l_float(self):
-        self.generic_operator.add_parameter("l_float", [2.0, 1.0])
-        self.assertEqual(self.generic_operator.get_parameter("l_float"), [2.0, 1.0])
+        self.generic_operator.add_attr("l_float", [2.0, 1.0])
+        self.assertEqual(self.generic_operator.get_attr("l_float"), [2.0, 1.0])
 
     def test_param_l_str(self):
-        self.generic_operator.add_parameter("l_str", ["ok"])
-        self.assertEqual(self.generic_operator.get_parameter("l_str"), ["ok"])
+        self.generic_operator.add_attr("l_str", ["ok"])
+        self.assertEqual(self.generic_operator.get_attr("l_str"), ["ok"])
+
+    def test_dynamicattribute_binding(self):
+        # Check original C++ attributes are binded
+        attrs = aidge_core.test_DynamicAttributes_binding()
+        self.assertEqual(attrs.has_attr("a"), True)
+        self.assertEqual(attrs.get_attr("a"), 42)
+        self.assertEqual(attrs.has_attr("b"), True)
+        self.assertEqual(attrs.get_attr("b"), "test")
+        self.assertEqual(attrs.has_attr("c"), True)
+        self.assertEqual(attrs.get_attr("c"), [True, False, True])
+        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c"})
+        self.assertEqual(attrs.has_attr("d"), False)
+
+        # Add Python attributes
+        attrs.add_attr("d", 18.56)
+        self.assertEqual(attrs.get_attr("d"), 18.56)
+        self.assertEqual(attrs.has_attr("d"), True)
+        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c", "d"})
+        self.assertEqual(attrs.has_attr("e"), False)
+
+        # Check that added Python attribute is accessible in C++
+        # Return the value of an attribute named "d" of type float64 (double in C++)
+        self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 18.56)
+        attrs.set_attr("d", 23.89)
+        self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
+
+    def test_compute_output_dims(self):
+        in_dims=[25, 25]
+        input = aidge_core.Producer(in_dims, name="In")
+        genOp = aidge_core.GenericOperator("genOp", 1, 1, 1, name="genOp")
+        _ = aidge_core.sequential([input, genOp])
+        self.assertListEqual(genOp.get_operator().output(0).dims(), [])
+        genOp.get_operator().set_compute_output_dims(lambda x:x)
+        genOp.get_operator().compute_output_dims()
+        self.assertListEqual(genOp.get_operator().output(0).dims(), in_dims)
 
 if __name__ == '__main__':
-    unittest.main()
\ No newline at end of file
+    unittest.main()
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index 02c7598820d2429bc49ff9a2f02c8ee841783173..566650713c36236c19763f466ee906970466c02e 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -11,7 +11,7 @@ SPDX-License-Identifier: EPL-2.0
 import unittest
 import aidge_core
 
-class test_parameters(unittest.TestCase):
+class test_attributes(unittest.TestCase):
     """Very basic test to make sure the python APi is not broken.
     Can be remove in later stage of the developpement.
     """
@@ -27,21 +27,21 @@ class test_parameters(unittest.TestCase):
         out_channels = 8
         k_dims = [2, 2]
         conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator()
-        self.assertEqual(conv_op.get("InChannels"), in_channels)
-        self.assertEqual(conv_op.get("OutChannels"), out_channels)
-        self.assertEqual(conv_op.get("KernelDims"), k_dims)
+        self.assertEqual(conv_op.get_attr("InChannels"), in_channels)
+        self.assertEqual(conv_op.get_attr("OutChannels"), out_channels)
+        self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
 
     def test_fc(self):
         out_channels = 8
         nb_bias = True
         fc_op = aidge_core.FC(out_channels, nb_bias).get_operator()
-        self.assertEqual(fc_op.get("OutChannels"), out_channels)
-        self.assertEqual(fc_op.get("NoBias"), nb_bias)
+        self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
+        self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
 
     def test_matmul(self):
         out_channels = 8
-        matmul_op = aidge_core.Matmul(out_channels).get_operator()
-        self.assertEqual(matmul_op.get("OutChannels"), out_channels)
+        matmul_op = aidge_core.MatMul(out_channels).get_operator()
+        self.assertEqual(matmul_op.get_attr("OutChannels"), out_channels)
 
     def test_producer_1D(self):
         dims = [5]
@@ -71,7 +71,7 @@ class test_parameters(unittest.TestCase):
     def test_leaky_relu(self):
         negative_slope = 0.25
         leakyrelu_op = aidge_core.LeakyReLU(negative_slope).get_operator()
-        self.assertEqual(leakyrelu_op.get("NegativeSlope"), negative_slope)
+        self.assertEqual(leakyrelu_op.get_attr("NegativeSlope"), negative_slope)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/aidge_core/unit_tests/test_recipies.py b/aidge_core/unit_tests/test_recipies.py
new file mode 100644
index 0000000000000000000000000000000000000000..754907443530f7e73d1e10ed9549d0c8eb78a011
--- /dev/null
+++ b/aidge_core/unit_tests/test_recipies.py
@@ -0,0 +1,78 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+
+class test_recipies(unittest.TestCase):
+    """
+    """
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    def test_remove_flatten(self):
+        graph_view = aidge_core.sequential([
+            aidge_core.GenericOperator("Flatten", 1, 1, 1, name="Flatten0"),
+            aidge_core.FC(50, name='0')
+        ])
+        old_nodes = graph_view.get_nodes()
+        aidge_core.remove_flatten(graph_view)
+        self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 1)
+        self.assertTrue("Flatten0" not in [i.name for i in graph_view.get_nodes()])
+
+        self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
+
+    def test_fuse_matmul_add(self):
+        matmul0 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul0")
+        add0 = aidge_core.Add(name="Add0")
+        matmul1 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul1")
+        add1 = aidge_core.Add(name="Add1")
+
+        graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
+
+        w0 = aidge_core.Producer([1, 1], name="W0")
+        w0.add_child(matmul0, 0, 1)
+        graph_view.add(w0)
+
+        b0 = aidge_core.Producer([1], name="B0")
+        b0.add_child(add0, 0, 1)
+        graph_view.add(b0)
+
+        w1 = aidge_core.Producer([1, 1], name="W1")
+        w1.add_child(matmul1, 0, 1)
+        graph_view.add(w1)
+
+        b1 = aidge_core.Producer([1], name="B1")
+        b1.add_child(add1, 0, 1)
+        graph_view.add(b1)
+
+        old_nodes = graph_view.get_nodes()
+        aidge_core.fuse_mul_add(graph_view)
+
+        self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 2)
+        self.assertTrue("MatMul0" not in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("Add0" not in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("MatMul1" not in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("Add1" not in [i.name() for i in graph_view.get_nodes()])
+
+        self.assertTrue("W0" in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("B0" in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("W1" in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("B1" in [i.name() for i in graph_view.get_nodes()])
+        # TODO : Vérifier que FC bien crée
+
+if __name__ == '__main__':
+    unittest.main()
+
+
+
diff --git a/aidge_core/unit_tests/test_tensor.py b/aidge_core/unit_tests/test_tensor.py
new file mode 100644
index 0000000000000000000000000000000000000000..a214a0e354c64b515d0a7ac24d81c85e116938ca
--- /dev/null
+++ b/aidge_core/unit_tests/test_tensor.py
@@ -0,0 +1,44 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+
+from functools import reduce
+import numpy as np
+
+class test_tensor(unittest.TestCase):
+    """
+    """
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    def test_getcoord_getidx(self):
+        dims = [2,2,2]
+        size = reduce((lambda x, y: x*y), dims)
+
+        np_array = np.arange(size).reshape(dims)
+
+        t = aidge_core.Tensor(np_array)
+        for i in range(size):
+            coord = t.get_coord(i)
+            idx = t.get_idx(coord)
+            self.assertEqual(idx, i)
+
+if __name__ == '__main__':
+    unittest.main()
+
+
+
+
+
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index d89c77fe378ce2e1df4c2fa09c8eb509d2c0e735..95115189a22eef3391b504e5f2313f388bd815bd 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -33,16 +33,18 @@
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/GenericOperator.hpp"
-#include "aidge/operator/Matmul.hpp"
+#include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Softmax.hpp"
+#include "aidge/operator/Scaling.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
-#include "aidge/utils/CParameter.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Recipies.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index d10270b62bb75412a6cbd9203b9b7a3fe220e5aa..453e30a8636d86794c96723350bff615af090e3e 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -14,11 +14,13 @@
 
 #include <cstddef>
 #include <vector>
+#include <memory>
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 class OperatorImpl {
 public:
+
     virtual void forward(){};
     virtual void backward(){};
 
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index c56f66fc0b827ccccd9749b9880507dbf48c8179..dfe3d932ac68929acfd26ecf7126e07c4707bcfc 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -27,6 +27,9 @@ public:
     {
         printf("Cannot set raw pointer for backend %s\n", mBackend);
     };
+
+    virtual void* getRaw(std::size_t /*idx*/)=0;
+
     virtual std::size_t scalarSize() const = 0; // Size of one scalar (in bytes)
     constexpr const char *backend() const { return mBackend; }
     virtual ~TensorImpl() = default;
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 81b7810a8a548df7e5a2829b1a31cbe337491382..02f4df320d87d1bb02edfa5c11ffe8bc7f560986 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_DATA_H_
 #define AIDGE_DATA_H_
 
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Attributes.hpp"
 
 namespace Aidge {
 enum class DataType {
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index c3a6e478f8943253a9f9b3565db2d4452a9ca133..7422a52eb171ee6dae0e14ad67c0562295fe5d8c 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -446,18 +446,33 @@ class Tensor : public Data,
      */
     bool empty() const { return mDims.empty(); }
 
-    template <typename expectedType, std::array<std::size_t, 1>::size_type DIM>
-    constexpr expectedType &get(std::array<std::size_t, DIM> idx) {
-        assert(DIM == mDims.size());
-        assert(mImpl);
-        std::size_t unfoldedIdx = 0;
-        for (std::size_t i = 0; i < DIM - std::size_t(1); ++i) {
-            unfoldedIdx = (unfoldedIdx + idx[i]) * mDims[i + 1];
-        }
-        unfoldedIdx += idx[DIM - 1];
-        return static_cast<expectedType *>(mImpl->rawPtr())[unfoldedIdx];
+    template <typename expectedType>
+    expectedType& get(std::size_t idx){
+        // TODO : add assert expected Type compatible with datatype
+        // TODO : add assert idx < Size
+        return *reinterpret_cast<expectedType *>(mImpl->getRaw(idx));
+    }
+
+    template <typename expectedType>
+    expectedType& get(std::vector<std::size_t> coordIdx){
+        return get<expectedType>(getIdx(coordIdx));
+    }
+
+    template <typename expectedType>
+    void set(std::size_t idx, expectedType value){
+        // TODO : add assert expected Type compatible with datatype
+        // TODO : add assert idx < Size
+        void* dataPtr = mImpl->getRaw(idx);
+        std::memcpy(dataPtr, &value, sizeof(expectedType));
     }
 
+    template <typename expectedType>
+    void set(std::vector<std::size_t> coordIdx, expectedType value){
+        set<expectedType>(getIdx(coordIdx), value);
+    }
+
+
+
     std::string toString() {
         if (dims().empty()) { return "{}"; }
         std::string res;
@@ -559,6 +574,42 @@ class Tensor : public Data,
         return mGrad;
     }
 
+    /**
+     * @brief From the the 1D index, return the coordinate of an element in the tensor.
+     *
+     * @param flatIdx 1D index of the value considering a flatten tensor.
+     * @return std::vector<DimSize_t>
+     */
+    std::vector<std::size_t> getCoord(std::size_t flatIdx) const {
+        std::vector<std::size_t> coordIdx = std::vector<std::size_t>(mDims.size());
+        std::size_t idx = flatIdx;
+        for (std::size_t i = mDims.size() - 1; i > 0; --i){
+            coordIdx[i] = (idx % mDims[i]);
+            idx/=mDims[i];
+        }
+        coordIdx[0] = idx % mDims[0];
+        return coordIdx;
+    }
+
+    /**
+     * @brief From the coordinate returns the 1D index of an element in the tensor.
+     *
+     * @param coordIdx Coordinate to an element in the tensor
+     * @return DimSize_t
+     */
+    std::size_t getIdx(std::vector<std::size_t> coordIdx) const {
+        // std::size_t flatIdx = 0;
+        // std::size_t stride = 1;
+        std::size_t flatIdx = 0;
+        assert(coordIdx.size() == mDims.size() && "Coordinates does not match number of dimensions");
+        std::size_t i = 0;
+        for(; i < mDims.size() - 1; ++i){
+            assert(coordIdx[i] < mDims[i] && "Coordinates dimensions does not fit the dimensions of the tensor");
+            flatIdx = (flatIdx + coordIdx[i]) * mDims[i + 1];
+        }
+        return flatIdx + coordIdx[i];
+    }
+
 private:
     ///\bug not protected against overflow
     std::size_t computeSize() {
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 31afeb43a786e81d22d4098b42cc1a5d1b167b98..89ba148497709f0af475bbf953ff285c88036102 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -320,8 +320,20 @@ public:
 
     void link(std::string name1_inID, std::string name2_outID);
 
-    void insert(Node &newNode, Node &inNode, std::initializer_list<Node> outNodes,
-                IOIndex_t tensorIdx);
+    /**
+     * @brief Insert a node (newParentNode) as a parent of the passed node (childNode).
+     * 
+     * @param childNode Node that gets a new parent.
+     * @param newParentNode Inserted Node.
+     * @param childInputTensorIdx Index of the input Tensor for the childNode linked to the inserted Node output.
+     * @param newParentInputTensorIdx Index of the input Tensor for the newParentNode linked to the former parent of childNode.
+     * @param newParentOutputTensorIdx Index of the output Tensor for the newParentNode linked to the childNode's input Tensor.
+     */
+    void insertParent(NodePtr childNode, 
+                        NodePtr newParentNode, 
+                        IOIndex_t childInputTensorIdx, 
+                        IOIndex_t newParentInputTensorIdx, 
+                        IOIndex_t newParentOutputTensorIdx);
 
     /**
      * @brief Replace the current GraphView with the set of given Nodes if possible
@@ -336,6 +348,37 @@ public:
      */
     void updateOutputNodes();
 
+    /**
+     * @brief Clone the GraphView with shared Operators. It is a new GraphView, with cloned Nodes, but the new Nodes refer to the same Operators as the original ones.
+     * @return std::shared_ptr<GraphView>
+     */
+    inline std::shared_ptr<GraphView> cloneSharedOperators() const {
+        return cloneCallback(&Node::cloneSharedOperators);
+    }
+
+    /**
+     * @brief Clone the GraphView with shared Producers. All the other Operators are copied.
+     * @return std::shared_ptr<GraphView>
+     */
+    inline std::shared_ptr<GraphView> cloneSharedProducers() const {
+        return cloneCallback(&Node::cloneSharedProducers);
+    }
+
+    /**
+     * @brief Clone the GraphView. Everything is cloned: Nodes and Operators.
+     * @return std::shared_ptr<GraphView>
+     */
+    inline std::shared_ptr<GraphView> clone() const {
+        return cloneCallback(&Node::clone);
+    }
+
+    /**
+     * @brief Clone the current GraphView using a callback function for the Node cloning, allowing to specify how each Node should be cloned or replaced by another Node type, or removed (i.e. replaced by identity). When a Node is removed, the clone() method automatically finds the next valid parent in line, going backward in the graph and connects it if that makes sense without ambiguity (effectively treating the removed Node as an identity operation).
+     * @param cloneNode Callback function to clone a node
+     * @return std::shared_ptr<GraphView>
+     */
+    std::shared_ptr<GraphView> cloneCallback(NodePtr(*cloneNode)(NodePtr)) const;
+
 private:
 ///////////////////////////////////////////////////////
 //        TENSOR MANAGEMENT
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 340a8318cbd0d59b7710bce7d46b7acd1670dd5b..dbe017fc7f8935e83ff1672392992c75a2e0658c 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -350,6 +350,55 @@ public:
    */
   void resetConnections(bool includeLearnableParam = false);
 
+  ///////////////////////////////////////////////////////
+  //        CLONE
+  ///////////////////////////////////////////////////////
+
+  /**
+   * @brief Clone the current Node. The Operator attribute of the new Node is not copied but shared with the current Node. The new node has no connection.
+   * @return NodePtr
+   */
+  NodePtr cloneSharedOperators() const;
+
+  /**
+   * @brief Clone the Node. Every attribute is copied, even Operator pointer except for Producers for which it is shared. The new Node has no connection.
+   * @return NodePtr
+   */
+  NodePtr cloneSharedProducers() const;
+
+  /**
+   * @brief Clone the Node and its Operator. The new Node has no connection.
+   * @return NodePtr
+   */
+  NodePtr clone() const;
+
+  /**
+   * @brief Callback function to clone the Node keeping the same Operator object instance. The new Node has no connection.
+   * @param node Node to clone.
+   * @return NodePtr
+   */
+  static NodePtr cloneSharedOperators(NodePtr node) {
+    return node->cloneSharedOperators();
+  }
+
+  /**
+   * @brief Callback function to clone the Node. Every attribute is copied, even Operator pointer except for Producers for which it is shared. The new Node has no connection.
+   * @param node Node to clone.
+   * @return NodePtr
+   */
+  static NodePtr cloneSharedProducers(NodePtr node) {
+    return node->cloneSharedProducers();
+  }
+
+  /**
+   * @brief Callback function to clone the Node and its Operator. The new Node has no connection.
+   * @param node Node to clone.
+   * @return NodePtr
+   */
+  static NodePtr clone(NodePtr node) {
+    return node->clone();
+  }
+
 private:
   ///////////////////////////////////////////////////////
   //        OPERATORS
diff --git a/include/aidge/hook/ExecTime.hpp b/include/aidge/hook/ExecTime.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..212fef58696be702e89c8ad973dcc0dd0fc389ae
--- /dev/null
+++ b/include/aidge/hook/ExecTime.hpp
@@ -0,0 +1,59 @@
+/**
+ * \file execTime.hpp
+ * \brief execTime structure
+ * \version file 1.0.0
+ * \date Creation 27 June 2023
+ * \date 27 June 2023
+ * \par ChangeLog
+ * \par
+ *  v1.0.0, 27 June 2023<br>
+ *  - Initial version.
+ * \author mn271187, ik243221
+ * \copyright
+ *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
+ *  rights reserved.
+ */
+
+#ifndef execTime_H_
+#define execTime_H_
+
+#include "aidge/operator/Operator.hpp"
+#include "aidge/hook/hook.hpp"
+#include <memory>
+#include <chrono>
+#include <vector>
+
+namespace Aidge {
+
+class ExecTime : public Hook {
+private:
+    std::vector<std::chrono::high_resolution_clock::time_point> registeredTimes = std::vector<std::chrono::high_resolution_clock::time_point>();
+public:
+    ExecTime(const std::shared_ptr<Operator> op) : Hook(op) {}
+    ~ExecTime() = default;
+
+    void call() override final {
+        registeredTimes.push_back(std::chrono::high_resolution_clock::now());
+    }
+
+    static std::shared_ptr<ExecTime> create(const std::shared_ptr<Operator> op)
+    {
+        return std::make_shared<ExecTime>(op);
+    }
+
+    std::vector<std::chrono::high_resolution_clock::time_point> getTimes() {
+        return  registeredTimes;
+    }
+
+    std::chrono::high_resolution_clock::time_point getTime(size_t idx) {
+        return registeredTimes[idx];
+    }
+
+};
+
+namespace {
+    static Registrar<Hook> registrarHook_ExecTime({"execution_time"}, Aidge::ExecTime::create);
+}
+}
+
+#endif /* execTime_H_ */
\ No newline at end of file
diff --git a/include/aidge/hook/Hook.hpp b/include/aidge/hook/Hook.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..28f7ef5cddbc649af50209ba77527b8b75d731b7
--- /dev/null
+++ b/include/aidge/hook/Hook.hpp
@@ -0,0 +1,41 @@
+/**
+ * \file Hook.hpp
+ * \brief Hook structure
+ * \version file 1.0.0
+ * \date Creation 27 June 2023
+ * \date 27 June 2023
+ * \par ChangeLog
+ * \par
+ *  v1.0.0, 27 June 2023<br>
+ *  - Initial version.
+ * \author mn271187, ik243221
+ * \copyright
+ *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
+ *  rights reserved.
+ */
+
+#ifndef Hook_H_
+#define Hook_H_
+
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include <memory>
+
+namespace Aidge {
+
+class Operator;
+class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)> {
+//class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>{
+protected:
+    const std::shared_ptr<Operator> mOperator;
+
+public:
+    Hook(std::shared_ptr<Operator> op) : mOperator(op) {}
+    virtual ~Hook();
+
+    virtual void call() = 0;
+
+};
+}
+
+#endif /* Hook_H_ */
\ No newline at end of file
diff --git a/include/aidge/hook/OutputRange.hpp b/include/aidge/hook/OutputRange.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a2da2a997d594c0ef78fb7c31f33b32c3495c4eb
--- /dev/null
+++ b/include/aidge/hook/OutputRange.hpp
@@ -0,0 +1,74 @@
+/**
+ * \file execTime.hpp
+ * \brief execTime structure
+ * \version file 1.0.0
+ * \date Creation 27 June 2023
+ * \date 27 June 2023
+ * \par ChangeLog
+ * \par
+ *  v1.0.0, 27 June 2023<br>
+ *  - Initial version.
+ * \author ik243221
+ * \copyright
+ *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
+ *  rights reserved.
+ */
+
+#ifndef AIDGE_CORE_HOOK_OUTPUTRANGE_H_
+#define AIDGE_CORE_HOOK_OUTPUTRANGE_H_
+
+#include "aidge/operator/Operator.hpp"
+#include "aidge/hook/hook.hpp"
+#include <memory>
+#include <chrono>
+#include <vector>
+#include <cmath>
+namespace Aidge {
+
+class OutputRange : public Hook {
+private:
+    std::vector<float> registeredOutputs = std::vector<float>();
+public:
+    OutputRange(const std::shared_ptr<Operator> op) : Hook(op) {}
+    ~OutputRange() = default;
+
+    void call() override final {
+        //std::cout << "call() outputRange hook " << std::endl;
+        //this assumes there is only 1 output possible
+        std::shared_ptr<Tensor> tensor = mOperator->getOutput(0);
+        //tensor->print();
+        //std::cout << "call() outputRange hook : tensor printed" << std::endl;
+        float max_value = 0.;
+        float * casted_tensor = static_cast<float *>(tensor->getImpl()->rawPtr());
+        //find the absolute max value in the tensor, save it to registered outputs
+        for(std::size_t i = 0; i < tensor->size(); ++i) {
+            //std::cout << "call() outputRange hook : casted_tensor[i] = " << casted_tensor[i] << std::endl;
+            if(std::abs(casted_tensor[i]) > max_value){
+                max_value = std::abs(casted_tensor[i]);
+            }
+        }
+        //std::cout << "call() outputRange hook : max_value = " << max_value << std::endl;
+        registeredOutputs.push_back(max_value);
+    }
+
+    static std::shared_ptr<OutputRange> create(const std::shared_ptr<Operator> op)
+    {
+        return std::make_shared<OutputRange>(op);
+    }
+
+    std::vector<float> getOutputs() {
+        return  registeredOutputs;
+    }
+
+    float getOutput(size_t idx) {
+        return registeredOutputs[idx];
+    }
+
+};
+
+namespace {
+    static Registrar<Hook> registrarHook_OutputRange({"output_range"}, Aidge::OutputRange::create);
+}
+}
+
+#endif /* outputRange_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index ff3d1888c3bc70b61a3d4da42908d40de2d1d73e..1e0f17e6db9278e7edf2a11918472c084561a308 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -32,14 +32,13 @@ class Add_Op : public Operator,
 public:
     // FIXME: change accessibility
     std::array<std::shared_ptr<Tensor>, NUM> mInputs;
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>(shared_from_this());
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
     static constexpr const char* Type = "Add";
 
     constexpr Add_Op()
-            : Operator(Type),
-            mOutput(std::make_shared<Tensor>())
+            : Operator(Type)
     {
         assert(NUM > 0 && "Add should have at least one input");
         for (std::size_t i = 0; i<NUM; ++i) {
@@ -48,6 +47,31 @@ public:
         setDatatype(DataType::Float32);
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Add_Op(const Add_Op<NUM>& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        assert(NUM > 0 && "Add should have at least one input");
+        for (std::size_t i = 0; i<NUM; ++i) {
+            mInputs[i] = std::make_shared<Tensor>();
+        }
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Add_Op<NUM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Add_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Add_Op>(*this);
+    }
+
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
     //         (strcmp(inputName, "weight") ? mInputs[1] :
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 8bf5e21d116c8f20d49630bb60bb6579e2765ea7..5e187def3d26c8b931c408f11951d2b3782e7047 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class AvgPoolingParam { StrideDims, KernelDims };
+enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public Operator,
                 public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
-                public Parameterizable<AvgPoolingParam,
+                public StaticAttributes<AvgPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>> {
 private:
@@ -44,20 +44,42 @@ public:
 
     AvgPooling_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<AvgPoolingParam,
+    using Attributes_ = StaticAttributes<AvgPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>>;
-    template <AvgPoolingParam e>
-    using param = typename Parameterizable_::template param<e>;
+    template <AvgPoolingAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
-          Parameterizable_(param<AvgPoolingParam::StrideDims>(stride_dims),
-                           param<AvgPoolingParam::KernelDims>(kernel_dims)) {
+          Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
+                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {
         setDatatype(DataType::Float32);
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    AvgPooling_Op(const AvgPooling_Op<DIM>& op)
+        : Operator(Type),
+          Attributes_(op),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::AvgPooling_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<AvgPooling_Op<DIM>>(*this);
+    }
+
     constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 1 && "operators supports only 3 inputs");
         (void) inputIdx; // avoid unused warning
@@ -70,11 +92,11 @@ public:
         if (!mInput->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < this->template get<AvgPoolingParam::KernelDims>().size() ; ++dim) {
+            for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                             std::floor(static_cast<float>(mInput->dims()[dim+2] -
-                                                                    this->template get<AvgPoolingParam::KernelDims>()[dim]) /
-                                            static_cast<float>(this->template get<AvgPoolingParam::StrideDims>()[dim])));
+                                                                    this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
+                                            static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = mInput->dims()[1];
             outputDims[0] = mInput->dims()[0];
@@ -159,7 +181,7 @@ inline std::shared_ptr<Node> AvgPooling(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::AvgPoolingParam>::data[] = {"StrideDims",
+const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
                                                           "KernelDims"};
 }
 
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 6861c1359737f3f344f0c7d9b2d12c9ff35b88ad..90a6be7222ee1b3e377520f2bc612a72c2ba4ab3 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class BatchNormParam { Epsilon, Momentum };
+enum class BatchNormAttr { Epsilon, Momentum };
 
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public Operator,
                 public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
-                public Parameterizable<BatchNormParam, float, float> {
+                public StaticAttributes<BatchNormAttr, float, float> {
 public:
     // FIXME: change accessibility
     std::array<std::shared_ptr<Tensor>, 5> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
@@ -44,18 +44,40 @@ public:
 
     BatchNorm_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<BatchNormParam, float, float>;
-    template <BatchNormParam e>
-    using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<BatchNormAttr, float, float>;
+    template <BatchNormAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr BatchNorm_Op(float epsilon, float momentum)
         : Operator(Type),
-          Parameterizable_(param<BatchNormParam::Epsilon>(epsilon),
-                           param<BatchNormParam::Momentum>(momentum)),
+          Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
+                           attr<BatchNormAttr::Momentum>(momentum)),
           mOutput(std::make_shared<Tensor>()) {
         setDatatype(DataType::Float32);
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    BatchNorm_Op(const BatchNorm_Op<DIM>& op)
+        : Operator(Type),
+          Attributes_(op),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::BatchNorm_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<BatchNorm_Op<DIM>>(*this);
+    }
+
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
     //         (strcmp(inputName, "weight") ? mInputs[1] :
@@ -76,7 +98,6 @@ public:
         if (!mInputs[0]->empty()) {
             for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
                 if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
-                    assert(!mInputs[0]->hasImpl() && "Incompatible size with already implemented learnable parameter");
                     mInputs[i]->resize(std::array<DimSize_t, 1>({mInputs[0]->dims()[1]}));
                 }
             }
@@ -157,7 +178,7 @@ inline std::shared_ptr<Node> BatchNorm(const float epsilon = 1.0e-5F,
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::BatchNormParam>::data[] = { "Epsilon", "Momentum" };
+const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
 }
 
-#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
+#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 69bd8cd5b38eb728907f8ccfb2d6da928cbee8e9..30baa6166c3c9e711c7c7a77b5ab523761249a65 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvParam { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };
+enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };
 
 template <DimIdx_t DIM>
 class Conv_Op : public Operator,
                 public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
-                public Parameterizable<ConvParam, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
+                public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
                                        DimSize_t, std::array<DimSize_t, DIM>> {
 public:
     // FIXME: change accessibility
@@ -44,10 +44,10 @@ public:
 
     Conv_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<ConvParam, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
+    using Attributes_ = StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
                                              DimSize_t, DimSize_t, std::array<DimSize_t, DIM>>;
-    template <ConvParam e>
-    using param = typename Parameterizable_::template param<e>;
+    template <ConvAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr Conv_Op(DimSize_t in_channels,
                       DimSize_t out_channels,
@@ -55,14 +55,36 @@ public:
                       const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                       const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
-          Parameterizable_(param<ConvParam::StrideDims>(stride_dims),
-                           param<ConvParam::DilationDims>(dilation_dims),
-                           param<ConvParam::InChannels>(in_channels),
-                           param<ConvParam::OutChannels>(out_channels),
-                           param<ConvParam::KernelDims>(kernel_dims)) {
+          Attributes_(attr<ConvAttr::StrideDims>(stride_dims),
+                      attr<ConvAttr::DilationDims>(dilation_dims),
+                      attr<ConvAttr::InChannels>(in_channels),
+                      attr<ConvAttr::OutChannels>(out_channels),
+                      attr<ConvAttr::KernelDims>(kernel_dims)) {
         setDatatype(DataType::Float32);
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Conv_Op(const Conv_Op<DIM>& op)
+        : Operator(Type),
+          Attributes_(op),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Conv_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Conv_Op<DIM>>(*this);
+    }
+
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
     //         (strcmp(inputName, "weight") ? mInputs[1] :
@@ -87,17 +109,17 @@ public:
         if (!mInputs[0]->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < this->template get<ConvParam::KernelDims>().size() ; ++dim) {
-                const DimSize_t kernelExtent = this->template get<ConvParam::DilationDims>()[dim] *
-                                                       (this->template get<ConvParam::KernelDims>()[dim] - 1) +
+            for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
+                const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
+                                                       (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                         floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
-                              static_cast<float>(this->template get<ConvParam::StrideDims>()[dim])));
+                              static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
             }
 
-            outputDims[1] = this->template get<ConvParam::OutChannels>();
+            outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
             outputDims[0] = mInputs[0]->dims()[0];
             mOutput->resize(outputDims);
         }
@@ -188,8 +210,13 @@ inline std::shared_ptr<Node> Conv(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvParam>::data[] = {"StrideDims", "DilationDims", "InChannels", "OutChannels",
-                                                          "KernelDims"};
+const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
+    "StrideDims",
+    "DilationDims",
+    "InChannels",
+    "OutChannels",
+    "KernelDims"
+};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 9519cb2f5425162c3f7178422fc4e99edfb2751b..c1d36eaeda2837db90780958e9d6374bed97674c 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseParam { StrideDims, DilationDims, Channels, KernelDims };
+enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public Operator,
                 public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
-                public Parameterizable<ConvDepthWiseParam,
+                public StaticAttributes<ConvDepthWiseAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        DimSize_t,
@@ -47,25 +47,47 @@ class ConvDepthWise_Op : public Operator,
 
     ConvDepthWise_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<ConvDepthWiseParam,
+    using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              DimSize_t,
                                              std::array<DimSize_t, DIM>>;
-    template <ConvDepthWiseParam e>
-    using param = typename Parameterizable_::template param<e>;
+    template <ConvDepthWiseAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
-          Parameterizable_(param<ConvDepthWiseParam::StrideDims>(stride_dims),
-                           param<ConvDepthWiseParam::DilationDims>(dilation_dims),
-                           param<ConvDepthWiseParam::Channels>(0),
-                           param<ConvDepthWiseParam::KernelDims>(kernel_dims)) {
+          Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
+                      attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
+                      attr<ConvDepthWiseAttr::Channels>(0),
+                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {
         setDatatype(DataType::Float32);
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op)
+        : Operator(Type),
+          Attributes_(op),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ConvDepthWise_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
+    }
+
     constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
@@ -77,17 +99,17 @@ class ConvDepthWise_Op : public Operator,
         if (!mInputs[0]->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < this->template get<ConvDepthWiseParam::KernelDims>().size() ; ++dim) {
-                const DimSize_t kernelExtent = this->template get<ConvDepthWiseParam::DilationDims>()[dim] *
-                                                       (this->template get<ConvDepthWiseParam::KernelDims>()[dim] - 1) +
+            for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
+                const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
+                                                       (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                         floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
-                              static_cast<float>(this->template get<ConvDepthWiseParam::StrideDims>()[dim])));
+                              static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
             }
-            this->template get<ConvDepthWiseParam::Channels>() = mInputs[0]->dims()[1];
-            // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template get<ConvDepthWiseParam::KernelDims>()));
+            this->template getAttr<ConvDepthWiseAttr::Channels>() = mInputs[0]->dims()[1];
+            // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
             // if (mInputs[1]->empty()) {
             //     mInputs[1]->resize(weightDims);
             // }
@@ -182,7 +204,7 @@ inline std::shared_ptr<Node> ConvDepthWise(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseParam>::data[] = {"StrideDims", "DilationDims", "Channels",
+const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels",
                                                           "KernelDims"};
 }
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index db92dc9c735416d250fa32e2f9010b21b8f808c0..127d39a8bdfdd233cdac9e1ca6cf0bf85f656d16 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -23,17 +23,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class FCParam { OutChannels, NoBias };
+enum class FCAttr { OutChannels, NoBias };
 
 class FC_Op : public Operator,
               public Registrable<FC_Op,
                                  std::string,
                                  std::unique_ptr<OperatorImpl>(const FC_Op &)>,
-              public Parameterizable<FCParam, DimSize_t, bool> {
+              public StaticAttributes<FCAttr, DimSize_t, bool> {
 public:
     // FIXME: change accessibility
     std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(), std::make_shared<Tensor>()};
@@ -44,24 +44,45 @@ public:
 
     FC_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<FCParam, DimSize_t, bool>;
-    template <FCParam e> using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<FCAttr, DimSize_t, bool>;
+    template <FCAttr e> using attr = typename Attributes_::template attr<e>;
 
     FC_Op(DimSize_t out_channels, bool noBias)
             : Operator(Type),
-            Parameterizable_(
-                param<FCParam::OutChannels>(out_channels),
-                param<FCParam::NoBias>(noBias)),
-            mOutput(std::make_shared<Tensor>())
+            Attributes_(
+                attr<FCAttr::OutChannels>(out_channels),
+                attr<FCAttr::NoBias>(noBias))
     {
         setDatatype(DataType::Float32);
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    FC_Op(const FC_Op& op)
+        : Operator(Type),
+          Attributes_(op),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::FC_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<FC_Op>(*this);
+    }
+
     void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
         if (inputIdx == 2) {
-            assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template get<FCParam::NoBias>()) == false ? static_cast<std::size_t>(this->template get<FCParam::OutChannels>()) : 0));
+            assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0));
             assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
         }
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
@@ -72,9 +93,9 @@ public:
     void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             // <in_features**, out_channels>
-            std::array<DimSize_t, 2> weightDims = {this->template get<FCParam::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
+            std::array<DimSize_t, 2> weightDims = {this->template getAttr<FCAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
             // <out_channels, batch>
-            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template get<FCParam::OutChannels>()};
+            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<FCAttr::OutChannels>()};
 
             mInputs[1]->resize(weightDims);
             mOutput->resize(outputDims);
@@ -150,7 +171,7 @@ inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, con
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::FCParam>::data[] = {"OutChannels",
+const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index dab5df9a8f2d1e7d2cd680703d70e38d564c2564..1e51866177acf80441f236070aea9dee6145bc19 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -16,24 +16,28 @@
 #include <vector>
 #include <string>
 #include <cassert>
+#include <cstring>
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/CParameter.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 class GenericOperator_Op
     : public Operator,
-      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> {
+      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
+      public DynamicAttributes {
    private:
-    CParameter mParams;
+    using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
+
     IOIndex_t mNbDataIn;
     IOIndex_t mNbIn;
     IOIndex_t mNbOut;
     std::vector<std::shared_ptr<Tensor>> mInputs;
     std::vector<std::shared_ptr<Tensor>> mOutputs;
+    ComputeDimsFunc mComputeOutputDims;
 
    public:
     GenericOperator_Op(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut)
@@ -50,52 +54,76 @@ class GenericOperator_Op
     }
 
     /**
-     * @brief Get the Parameter object identified by its name.
-     * @tparam T expected parameter type.
-     * @param key Parameter name.
-     * @details assert if T is not the actual parameter type, if the parameter
-     * does not exist or internal parameter position is invalid.
-     * @todo Returning a T const& ? But dangerous => may get an address within
-     * param buffer that will get invalid after the CParam death.
-     * @note at() throws if the parameter does not exist, using find to test
-     * for parameter existance
-     * @return template<class T> The parameter.
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
      */
-    template <class T>
-    T getParameter(std::string const &key) const {
-        return mParams.Get<T>(key);
+    GenericOperator_Op(const GenericOperator_Op& op)
+        : Operator(op.type().c_str()), mNbDataIn(op.mNbDataIn), mNbIn(op.mNbIn), mNbOut(op.mNbOut)
+    {
+        // cpy-ctor
+        mInputs = std::vector<std::shared_ptr<Tensor>>(mNbIn);
+        for (std::size_t i = 0; i < mNbIn; ++i) {
+            mInputs[i] = std::make_shared<Tensor>();
+        }
+        mOutputs = std::vector<std::shared_ptr<Tensor>>(mNbOut);
+        for (std::size_t i = 0; i < mNbOut; ++i) {
+            mOutputs[i] = std::make_shared<Tensor>(*op.mOutputs[i]);
+        }
     }
 
-    ///\brief Add a parameter value, identified by its name
-    ///\tparam T expected parameter type
-    ///\param i_ParamName Parameter name
-    ///\param i_Value Parameter value
-    ///\todo Pass i_Value by ref if large or not trivial
-    ///\bug If parameter already exists, its value is changed but written in the
-    /// internal buffer in a new location (previous value is still in memory at
-    /// its previous location)
-    template <class T>
-    void addParameter(std::string const &key, T const &value) {
-        mParams.Add<T>(key, value);
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::GenericOperator_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<GenericOperator_Op>(*this);
     }
 
+    // Helper functions that can be used with setComputeOutputDims():
+    static const ComputeDimsFunc Identity;
 
-    std::string getParameterType(std::string const &key) { return mParams.getParamType(key); }
-
-    std::vector<std::string> getParametersName() { return mParams.getParametersName(); }
+    void setComputeOutputDims(ComputeDimsFunc func) {
+        mComputeOutputDims = func;
+    }
 
     // Override Virtual Opertor methods
-    void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
-        printf("Info: using associateInput() on a GenericOperator.\n");
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < mNbIn && "operators supports only x inputs");
+
+        if (strcmp(data->type(), Tensor::Type) == 0) {
+            // TODO: associate input only if of type Tensor, otherwise do nothing
+            mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+        }
     }
 
     void computeOutputDims() override final {
-        assert(false && "Cannot compute output dim of a GenericOperator");
+        if (mComputeOutputDims) {
+            std::vector<std::vector<size_t>> inputsDims(mNbIn, std::vector<size_t>());
+            for (std::size_t i = 0; i < mNbIn; ++i) {
+                if (mInputs[i]) {
+                    inputsDims[i] = mInputs[i]->dims();
+                }
+            }
+
+            const auto& outputsDims = mComputeOutputDims(inputsDims);
+            assert(outputsDims.size() == mNbOut && "The provided ComputeDimsFunc function returns the wrong number of outputs");
+            for (std::size_t i = 0; i < mNbOut; ++i) {
+                mOutputs[i]->resize(outputsDims[i]);
+            }
+        }
+        else {
+            assert(false && "Cannot compute output dim of a GenericOperator");
+        }
     }
 
     bool outputDimsForwarded() const override final {
-        assert(false && "GenericOperator cannot forward dims");
-        return false;
+        if (mComputeOutputDims) {
+            return !(mOutputs[0]->empty());
+        }
+        else {
+            assert(false && "GenericOperator cannot forward dims");
+            return false;
+        }
     }
 
     std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 1dff2550a42245351afab5b8bb1a708a8d0d8c0b..c6ee01239e1ed065587276c1891d26ba3899fe89 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -15,7 +15,7 @@
 #include <vector>
 #include <memory>
 
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
@@ -25,13 +25,13 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class LeakyReLUParam {
+enum class LeakyReLUAttr {
     NegativeSlope
 };
 
 class LeakyReLU_Op : public Operator,
     public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
-    public Parameterizable<LeakyReLUParam, float> {
+    public StaticAttributes<LeakyReLUAttr, float> {
 public:
     // FIXME: change accessibility
     std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
@@ -42,17 +42,39 @@ public:
 
     LeakyReLU_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<LeakyReLUParam, float>;
-    template <LeakyReLUParam e> using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<LeakyReLUAttr, float>;
+    template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
 
     LeakyReLU_Op(float negativeSlope)
             : Operator(Type),
-            Parameterizable_(
-                param<LeakyReLUParam::NegativeSlope>(negativeSlope))
+            Attributes_(
+                attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
     {
         setDatatype(DataType::Float32);
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    LeakyReLU_Op(const LeakyReLU_Op& op)
+        : Operator(Type),
+          Attributes_(op),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::LeakyReLU_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<LeakyReLU_Op>(*this);
+    }
+
     void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx == 0 && "operator supports only 1 input");
         (void) inputIdx; // avoid unused warning
@@ -125,7 +147,7 @@ inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::st
 
 namespace {
 template <>
-const char* const EnumStrings<Aidge::LeakyReLUParam>::data[]
+const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
     = {"NegativeSlope"};
 }
 
diff --git a/include/aidge/operator/Matmul.hpp b/include/aidge/operator/MatMul.hpp
similarity index 66%
rename from include/aidge/operator/Matmul.hpp
rename to include/aidge/operator/MatMul.hpp
index 639b366912060b3e085510f312d94568e6b65f03..d0dadd847a59c9d2a1c0dd97f2f200437da71863 100644
--- a/include/aidge/operator/Matmul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -23,38 +23,59 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class MatmulParam { OutChannels };
+enum class MatMulAttr { OutChannels };
 
-class Matmul_Op : public Operator,
-              public Registrable<Matmul_Op,
+class MatMul_Op : public Operator,
+              public Registrable<MatMul_Op,
                                  std::string,
-                                 std::unique_ptr<OperatorImpl>(const Matmul_Op &)>,
-              public Parameterizable<MatmulParam, DimSize_t> {
+                                 std::unique_ptr<OperatorImpl>(const MatMul_Op &)>,
+              public StaticAttributes<MatMulAttr, DimSize_t> {
 public:
     std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
     const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
-    static constexpr const char* Type = "Matmul";
+    static constexpr const char* Type = "MatMul";
 
-    Matmul_Op() = delete;
+    MatMul_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<MatmulParam, DimSize_t>;
-    template <MatmulParam e> using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<MatMulAttr, DimSize_t>;
+    template <MatMulAttr e> using attr = typename Attributes_::template attr<e>;
 
-    Matmul_Op(DimSize_t out_channels)
+    MatMul_Op(DimSize_t out_channels)
             : Operator(Type),
-            Parameterizable_(
-                param<MatmulParam::OutChannels>(out_channels)),
-            mOutput(std::make_shared<Tensor>())
+            Attributes_(
+                attr<MatMulAttr::OutChannels>(out_channels))
     {
         setDatatype(DataType::Float32);
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    MatMul_Op(const MatMul_Op& op)
+        : Operator(Type),
+          Attributes_(op),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::MatMul_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<MatMul_Op>(*this);
+    }
+
     void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 2 && "operators supports only 2 inputs");
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
@@ -64,9 +85,9 @@ public:
     void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             // <in_features**, out_channels>
-            std::array<DimSize_t, 2> weightDims = {static_cast<DimSize_t>(mInputs[0]->size()), this->template get<MatmulParam::OutChannels>()};
+            std::array<DimSize_t, 2> weightDims = {this->template getAttr<MatMulAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
             // <out_channels, batch>
-            std::array<DimSize_t, 1> outputDims = {this->template get<MatmulParam::OutChannels>()};
+            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()};
 
             mInputs[1]->resize(weightDims);
             mOutput->resize(outputDims);
@@ -107,7 +128,7 @@ public:
 
 
     void setBackend(const std::string& name) {
-        mImpl = Registrar<Matmul_Op>::create(name)(*this);
+        mImpl = Registrar<MatMul_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
         // FIXME: temporary workaround
@@ -129,17 +150,17 @@ public:
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
-inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const std::string& name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name);
-    addProducer(matmul, 1, {1, out_channels}, "w");
+inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& name = "") {
+    // FIXME: properly handle default w initialization in every cases
+    auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(out_channels), name);
+    addProducer(matmul, 1, {out_channels, 1}, "w");
     return matmul;
 }
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::MatmulParam>::data[] = {"OutChannels"};
+const char *const EnumStrings<Aidge::MatMulAttr>::data[] = {"OutChannels"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 80378fd2b6a66073f2573bbd02e6b90f8577e971..2da3a1268da694e290e4650071e1153f1be4a9b8 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class MaxPoolingParam { StrideDims, KernelDims };
+enum class MaxPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public Operator,
                 public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
-                public Parameterizable<MaxPoolingParam,
+                public StaticAttributes<MaxPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>> {
 private:
@@ -44,21 +44,43 @@ public:
 
     MaxPooling_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<MaxPoolingParam,
+    using Attributes_ = StaticAttributes<MaxPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>>;
-    template <MaxPoolingParam e>
-    using param = typename Parameterizable_::template param<e>;
+    template <MaxPoolingAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
-          Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims),
-                           param<MaxPoolingParam::KernelDims>(kernel_dims)),
+          Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
+                      attr<MaxPoolingAttr::KernelDims>(kernel_dims)),
           mOutput(std::make_shared<Tensor>()) {
         setDatatype(DataType::Float32);
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    MaxPooling_Op(const MaxPooling_Op<DIM>& op)
+        : Operator(Type),
+          Attributes_(op),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::MaxPooling_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<MaxPooling_Op<DIM>>(*this);
+    }
+
     constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 1 && "operators supports only 3 inputs");
         (void) inputIdx; // avoid unused warning
@@ -71,11 +93,11 @@ public:
         if (!mInput->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
+            for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                             std::floor(static_cast<float>(mInput->dims()[dim+2] -
-                                                                    this->template get<MaxPoolingParam::KernelDims>()[dim]) /
-                                            static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
+                                                                    this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
+                                            static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = mInput->dims()[1];
             outputDims[0] = mInput->dims()[0];
@@ -160,7 +182,7 @@ inline std::shared_ptr<Node> MaxPooling(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims"};
+const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"StrideDims", "KernelDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index b651902c3b30312a303b86947981c846c0ffc5dc..b45c0ae8a2dc3af37d57b56be0498062d104bc51 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -29,8 +29,6 @@ public:
     std::shared_ptr<SequentialScheduler> mScheduler;
 
    public:
-    MetaOperator_Op() = delete;
-
     MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph)
         : Operator(type),
           mGraph(graph)
@@ -45,6 +43,25 @@ public:
         }
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    MetaOperator_Op(const MetaOperator_Op& op)
+        : Operator(op.type().c_str()),
+          mGraph(op.mGraph->clone())
+    {
+        // cpy-ctor
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::MatMul_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<MetaOperator_Op>(*this);
+    }
+
     const std::shared_ptr<GraphView>& getMicroGraph() const {
         return mGraph;
     }
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 3c0e66221349228bf7e889fca783820a4a9da50e..e3544171de9b97a2795f1d936adfeff341bd32dc 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -20,12 +20,14 @@
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/hook/Hook.hpp"
 
 namespace Aidge {
 
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
   std::unique_ptr<OperatorImpl> mImpl; // implementation of the operator
+  std::map<std::string, std::shared_ptr<Hook>> mHooks;
 
 private:
   std::string mType;
@@ -33,8 +35,18 @@ private:
 public:
   Operator() = delete;
   Operator(const char* type) : mType(type) {}
+  virtual std::shared_ptr<Operator> clone() const = 0;
   virtual ~Operator();
 
+  Operator(const Operator& op):
+    std::enable_shared_from_this<Operator>()
+  {
+    mType = op.mType;
+    mImpl = nullptr;
+    // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
+    // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
+    // Hooks are not copied.
+  }
 
 public:
 
@@ -48,6 +60,15 @@ public:
     virtual std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const = 0;
     virtual Tensor& output(const IOIndex_t /*outputIdx*/) const = 0;
 
+    std::shared_ptr<Hook> getHook(std::string hookName) {
+        return mHooks[hookName];
+    }
+    void addHook(std::string hookName) {
+        mHooks.insert(std::pair<std::string, std::shared_ptr<Hook>>(hookName,Registrar<Hook>::create({hookName})(shared_from_this())));
+    }
+
+    void runHooks() const;
+
 ///////////////////////////////////////////////////////
 //        IMPLEMENTATION
 ///////////////////////////////////////////////////////
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 4431c74628fac4c62427d16a3e1fbb00e3d15207..53ce15b98b26b24d8c470927c4bd6f3200914b46 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -21,20 +21,20 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class PadParam { BeginEndTuples, Type, Value };
-enum class PadParamType { Constant, Replicate, Reflect, Wrap };
+enum class PadAttr { BeginEndTuples, Type, Value };
+enum class PadBorderType { Constant, Replicate, Reflect, Wrap };
 
 template <DimIdx_t DIM>
 class Pad_Op : public Operator,
                 public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
-                public Parameterizable<PadParam,
+                public StaticAttributes<PadAttr,
                                        std::array<std::array<DimSize_t, 2>, DIM>,
-                                       PadParamType,
+                                       PadBorderType,
                                        double> {
 private:
     // FIXME: change accessibility
@@ -46,30 +46,30 @@ public:
 
     Pad_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<PadParam,
+    using Attributes_ = StaticAttributes<PadAttr,
                                              std::array<std::array<DimSize_t, 2>, DIM>,
-                                             PadParamType,
+                                             PadBorderType,
                                              double>;
-    template <PadParam e>
-    using param = typename Parameterizable_::template param<e>;
+    template <PadAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr Pad_Op(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
-                     const PadParamType &type = PadParamType::Constant,
+                     const PadBorderType &type = PadBorderType::Constant,
                      double value = 0.0)
         : Operator(Type),
-          Parameterizable_(param<PadParam::BeginEndTuples>(beginEndTuples),
-                           param<PadParam::Type>(type),
-                           param<PadParam::Value>(value)) {
+          Attributes_(attr<PadAttr::BeginEndTuples>(beginEndTuples),
+                           attr<PadAttr::Type>(type),
+                           attr<PadAttr::Value>(value)) {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Pad_Op(const Pad_Op& op)
         : Operator(Type),
-          Parameterizable_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -79,10 +79,9 @@ public:
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pad_Op
-     * @param op Operator to copy.
      */
-    Operator* clone() const /*override*/ {  // TODO: FIXME: after merge with clone branch
-        return new Pad_Op<DIM>(*static_cast<const Pad_Op<DIM>*>(this));
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Pad_Op<DIM>>(*this);
     }
 
     constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
@@ -98,9 +97,9 @@ public:
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
             for (std::size_t dim = 0; dim < DIM; ++dim) {
-                outputDims[dim+2] = this->template get<PadParam::BeginEndTuples>()[dim][0]
+                outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndTuples>()[dim][0]
                                     + mInput->dims()[dim+2]
-                                    + this->template get<PadParam::BeginEndTuples>()[dim][1];
+                                    + this->template getAttr<PadAttr::BeginEndTuples>()[dim][1];
             }
             outputDims[1] = mInput->dims()[1];
             outputDims[0] = mInput->dims()[0];
@@ -166,7 +165,7 @@ public:
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> Pad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
                                            const std::string& name = "",
-                                           const PadParamType &type = PadParamType::Constant,
+                                           const PadBorderType &type = PadBorderType::Constant,
                                            double value = 0.0)
 {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
@@ -177,7 +176,7 @@ inline std::shared_ptr<Node> Pad(const std::array<std::array<DimSize_t, 2>, DIM>
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, DIM> &dimBeginEnd,
                                            const std::string& name = "",
-                                           const PadParamType &type = PadParamType::Constant,
+                                           const PadBorderType &type = PadBorderType::Constant,
                                            double value = 0.0)
 {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
@@ -194,7 +193,7 @@ inline std::shared_ptr<Node> ZeroPad(const std::array<std::array<DimSize_t, 2>,
                                            const std::string& name = "")
 {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
-    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadParamType::Constant, 0.0), name);
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadBorderType::Constant, 0.0), name);
     return pad;
 }
 
@@ -207,7 +206,7 @@ inline std::shared_ptr<Node> ZeroPad(const std::array<DimSize_t, DIM> &dimBeginE
     for (size_t i = 0; i < DIM; ++i) {
         beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
     }
-    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadParamType::Constant, 0.0), name);
+    auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadBorderType::Constant, 0.0), name);
     return pad;
 }
 
@@ -215,7 +214,7 @@ template <DimSize_t DIM>
 inline std::shared_ptr<Node> Pad(
     std::array<DimSize_t, 2> const (&beginEndTuples)[DIM],
     const std::string& name = "",
-    const PadParamType &type = PadParamType::Constant,
+    const PadBorderType &type = PadBorderType::Constant,
     double value = 0.0)
 {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
@@ -225,10 +224,10 @@ inline std::shared_ptr<Node> Pad(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::PadParam>::data[] = {"BeginEndTuples", "Type", "Value"};
+const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndTuples", "Type", "Value"};
 
 template <>
-const char *const EnumStrings<Aidge::PadParamType>::data[] = {"Constant", "Replicate", "Reflect", "Wrap"};
+const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Replicate", "Reflect", "Wrap"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index acdc69b69ab86b25a11d889980b9236e41928316..593192c9f402e2646ac94cff68aa0c805f5aecd1 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -19,7 +19,7 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
@@ -29,15 +29,14 @@ class Producer_Op
       public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>(
                                           const Producer_Op &)> {
 private:
-    std::shared_ptr<Tensor> mOutput;
+    std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
     static constexpr const char* Type = "Producer";
 
     template <std::size_t DIM>
     Producer_Op(const std::array<DimSize_t, DIM>& dims)
-        : Operator(Type),
-          mOutput(std::make_shared<Tensor>())
+        : Operator(Type)
     {
         //ctor
         setDatatype(DataType::Float32);
@@ -51,10 +50,41 @@ public:
         setDatatype(tensor->dataType());
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Producer_Op(const Producer_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Producer_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Producer_Op>(*this);
+    }
+
     void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
         assert(false && "Producer operator takes no input");
     }
 
+    /**
+     * @brief Set the Output Tensor of the Producer operator.
+     * This method will create a copy of the Tensor.
+     *
+     * @param newOutput Tensor containing the values to copy 
+     */
+    void setOutputTensor(const Tensor& newOutput) {
+        *mOutput = newOutput;
+    }
+
     void computeOutputDims() override final {}
 
     bool outputDimsForwarded() const override final {return true;}
@@ -143,4 +173,4 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 141bd3ae12c7875a90d2549a24e5c141f3ff6aba..433e353f05f8b4ffc3cfc0e047464e7f9257da02 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -42,6 +42,27 @@ public:
         setDatatype(DataType::Float32);
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ReLU_Op(const ReLU_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ReLU_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ReLU_Op>(*this);
+    }
+
     void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx == 0 && "operator supports only 1 input");
         (void) inputIdx; // avoid unused warning
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0ea6ba39b3e4def2011ae5c7b2b9c348df5e2929
--- /dev/null
+++ b/include/aidge/operator/Scaling.hpp
@@ -0,0 +1,162 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CORE_OPERATOR_Scaling_H__
+#define __AIDGE_CORE_OPERATOR_Scaling_H__
+
+#include <vector>
+#include <memory>
+
+
+
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ScalingAttr {
+    scalingFactor
+};
+
+class Scaling_Op : public Operator,
+    public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
+    public StaticAttributes<ScalingAttr, float> {
+public:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char* Type = "Scaling";
+
+    Scaling_Op() = delete;
+
+    using Attributes_ = StaticAttributes<ScalingAttr, float>;
+    template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
+
+    Scaling_Op(float scalingFactor)
+            : Operator(Type),
+            Attributes_(
+                attr<ScalingAttr::scalingFactor>(scalingFactor))
+    {
+        setDatatype(DataType::Float32);
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Scaling_Op(const Scaling_Op& op)
+        : Operator(Type),
+          Attributes_(op),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Scaling_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Scaling_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
+        (void) inputIdx; //avoid unused warning
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    void computeOutputDims() override final {
+        if (!mInput->empty())
+            mOutput->resize(mInput->dims());
+    }
+
+    bool outputDimsForwarded() const override final {
+        return !(mOutput->empty());
+    }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx == 0) && "Scaling Operator has only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return *(mInput.get());
+    }
+    inline Tensor& output(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Scaling Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return *(mOutput.get());
+    }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert((inputIdx == 0) && "Scaling Operator has only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert((outputIdx == 0) && "Scaling Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning;
+        return mOutput;
+    }
+
+
+    void setBackend(const std::string& name) {
+        mImpl = Registrar<Scaling_Op>::create(name)(*this);
+        mOutput->setBackend(name);
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+    void setDatatype(const DataType& datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
+}
+}
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::ScalingAttr>::data[]
+    = {"scalingFactor"};
+}
+
+#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 64e713b331bbbbf612ee5102ba0ea82fb108350e..898bae4c31bb2c41947523a86bfb9cd5c7b732b4 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -42,6 +42,27 @@ public:
         setDatatype(DataType::Float32);
     }
 
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Softmax_Op(const Softmax_Op& op)
+        : Operator(Type),
+          mOutput(std::make_shared<Tensor>(*op.mOutput))
+    {
+        // cpy-ctor
+        setDatatype(op.mOutput->dataType());
+        mImpl = op.mImpl ? Registrar<Softmax_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Softmax_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Softmax_Op>(*this);
+    }
+
     void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx == 0 && "operator supports only 1 input");
         (void) inputIdx; // avoid unused warning
diff --git a/include/aidge/recipies/LabelGraph.hpp b/include/aidge/recipies/LabelGraph.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9dd77e5e9f397260cf936cf77b15616c17ea33b8
--- /dev/null
+++ b/include/aidge/recipies/LabelGraph.hpp
@@ -0,0 +1,35 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_RECIPIES_LABELGRAPH_H_
+#define AIDGE_RECIPIES_LABELGRAPH_H_
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+
+namespace Aidge {
+NodePtr nodeLabel(NodePtr node);
+
+/**
+ * @brief Generate the graph for the pixel-wise labels corresponding to a data graph, taking into account the scaling changes (padding, stride, pooling...).
+ * @details Right now, the behavior is to replace the following operators:
+ * - Conv: MaxPooling
+ * - ConvDepthWie: MaxPooling
+ * - AvgPooling: MaxPooling
+ * - MaxPooling: MaxPooling
+ * - all others: identity (removed)
+ * @param graph Data graph
+ * @param return Computing graph for the labels derived from the data graph
+ */
+std::shared_ptr<GraphView> labelGraph(std::shared_ptr<GraphView> graph);
+} // namespace Aidge
+
+#endif /* AIDGE_RECIPIES_LABELGRAPH_H_ */
diff --git a/include/aidge/utils/Any.hpp b/include/aidge/utils/Any.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0e65710596d31920de60a35d600e7ae612ea2bc4
--- /dev/null
+++ b/include/aidge/utils/Any.hpp
@@ -0,0 +1,552 @@
+/**
+ * Origin: https://github.com/claudiofantacci/any
+ * 
+ * Implementation of N4562 std::experimental::any (merged into C++17 as std::any)
+ * for C++11 compilers.
+ *
+ * See also:
+ *   + http://en.cppreference.com/w/cpp/any
+ *   + http://en.cppreference.com/w/cpp/experimental/any
+ *   + http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4562.html#any
+ *   + https://cplusplus.github.io/LWG/lwg-active.html#2509
+ *
+ * Copyright (c) 2016 Denilson das Mercês Amorim
+ * Copyright (c) 2018 Claudio Fantacci
+ *
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE.md or copy at http://www.boost.org/LICENSE_1_0.txt)
+ */
+
+#ifndef AIDGE_CORE_UTILS_ANY_H_
+#define AIDGE_CORE_UTILS_ANY_H_
+
+#include <stdexcept>
+#include <typeinfo>
+#include <type_traits>
+#include <utility>
+
+
+namespace libany
+{
+
+class bad_any_cast : public std::bad_cast
+{
+public:
+    const char* what() const noexcept override
+    {
+        return "bad any_cast";
+    }
+};
+
+
+class any final
+{
+public:
+    /**
+     * Constructs an object of type any with an empty state.
+     */
+    any() :
+        vtable(nullptr)
+    { }
+
+
+    /**
+     * Constructs an object of type any with an equivalent state as other.
+     */
+    any(const any& rhs) :
+        vtable(rhs.vtable)
+    {
+        if(rhs.has_value())
+        {
+            rhs.vtable->copy(rhs.storage, this->storage);
+        }
+    }
+
+
+    /**
+     * Constructs an object of type any with a state equivalent to the original state of other.
+     * rhs is left in a valid but otherwise unspecified state.
+     */
+    any(any&& rhs) noexcept :
+        vtable(rhs.vtable)
+    {
+        if(rhs.has_value())
+        {
+            rhs.vtable->move(rhs.storage, this->storage);
+            rhs.vtable = nullptr;
+        }
+    }
+
+
+    /**
+     * Same effect as this->clear().
+     */
+    ~any()
+    {
+        this->reset();
+    }
+
+
+    /**
+     * Constructs an object of type any that contains an object of type T direct-initialized with std::forward<ValueType>(value).
+     * T shall satisfy the CopyConstructible requirements, otherwise the program is ill-formed.
+     * This is because an `any` may be copy constructed into another `any` at any time, so a copy should always be allowed.
+     */
+    template<typename ValueType, typename = typename std::enable_if<!std::is_same<typename std::decay<ValueType>::type, any>::value>::type>
+    any(ValueType&& value)
+    {
+        static_assert(std::is_copy_constructible<typename std::decay<ValueType>::type>::value,
+                      "T shall satisfy the CopyConstructible requirements.");
+        this->construct(std::forward<ValueType>(value));
+    }
+
+
+    /**
+     * Has the same effect as any(rhs).swap(*this). No effects if an exception is thrown.
+     */
+    any& operator=(const any& rhs)
+    {
+        any(rhs).swap(*this);
+        return *this;
+    }
+
+
+    /**
+     * Has the same effect as any(std::move(rhs)).swap(*this).
+     * The state of *this is equivalent to the original state of rhs and rhs is left in a valid
+     * but otherwise unspecified state.
+     */
+    any& operator=(any&& rhs) noexcept
+    {
+        any(std::move(rhs)).swap(*this);
+        return *this;
+    }
+
+
+    /**
+     * Has the same effect as any(std::forward<ValueType>(value)).swap(*this). No effect if a exception is thrown.
+     * T shall satisfy the CopyConstructible requirements, otherwise the program is ill-formed.
+     * This is because an `any` may be copy constructed into another `any` at any time, so a copy should always be allowed.
+     */
+    template<typename ValueType, typename = typename std::enable_if<!std::is_same<typename std::decay<ValueType>::type, any>::value>::type>
+    any& operator=(ValueType&& value)
+    {
+        static_assert(std::is_copy_constructible<typename std::decay<ValueType>::type>::value, "T shall satisfy the CopyConstructible requirements.");
+        any(std::forward<ValueType>(value)).swap(*this);
+        return *this;
+    }
+
+
+    /**
+     * If not empty, destroys the contained object.
+     */
+    void reset() noexcept
+    {
+        if(has_value())
+        {
+            this->vtable->destroy(storage);
+            this->vtable = nullptr;
+        }
+    }
+
+
+    /**
+     * Returns true if *this has no contained object, otherwise false.
+     */
+    bool has_value() const noexcept
+    {
+        return this->vtable != nullptr;
+    }
+
+
+    /**
+     * If *this has a contained object of type T, typeid(T); otherwise typeid(void).
+     */
+    const std::type_info& type() const noexcept
+    {
+        return has_value()? this->vtable->type() : typeid(void);
+    }
+
+
+    /**
+     * Exchange the states of *this and rhs.
+     */
+    void swap(any& other) noexcept
+    {
+        if(this->vtable != other.vtable)
+        {
+            any tmp(std::move(other));
+
+            other.vtable = this->vtable;
+            if(this->vtable != nullptr)
+                this->vtable->move(this->storage, other.storage);
+
+            this->vtable = tmp.vtable;
+            if(tmp.vtable != nullptr)
+            {
+                tmp.vtable->move(tmp.storage, this->storage);
+                tmp.vtable = nullptr;
+            }
+        }
+        else
+        {
+            if(this->vtable != nullptr)
+                this->vtable->swap(this->storage, other.storage);
+        }
+    }
+
+
+private:
+    union storage_union
+    {
+        using stack_storage_t = typename std::aligned_storage<2 * sizeof(void*), std::alignment_of<void*>::value>::type;
+
+        void* dynamic;
+
+        stack_storage_t stack;
+    };
+
+
+    /**
+     * Base VTable specification.
+     *
+     * Note: The caller is responsible for doing .vtable = nullptr after destructful operations
+     * such as destroy() and/or move().
+     */
+    struct vtable_type
+    {
+        /**
+         * The type of the object this vtable is for.
+         */
+        const std::type_info& (*type)() noexcept;
+
+
+        /**
+         * Destroys the object in the union.
+         * The state of the union after this call is unspecified, caller must ensure not to use src anymore.
+         */
+        void(*destroy)(storage_union&) noexcept;
+
+
+        /**
+         * Copies the **inner** content of the src union into the yet unitialized dest union.
+         * As such, both inner objects will have the same state, but on separate memory locations.
+         */
+        void(*copy)(const storage_union& src, storage_union& dest);
+
+
+        /**
+         * Moves the storage from src to the yet unitialized dest union.
+         * The state of src after this call is unspecified, caller must ensure not to use src anymore.
+         */
+        void(*move)(storage_union& src, storage_union& dest) noexcept;
+
+
+        /**
+         * Exchanges the storage between lhs and rhs.
+         */
+        void(*swap)(storage_union& lhs, storage_union& rhs) noexcept;
+    };
+
+
+    /**
+     * VTable for dynamically allocated storage.
+     */
+    template<typename T>
+    struct vtable_dynamic
+    {
+        static const std::type_info& type() noexcept
+        {
+            return typeid(T);
+        }
+
+
+        static void destroy(storage_union& storage) noexcept
+        {
+            delete reinterpret_cast<T*>(storage.dynamic);
+        }
+
+
+        static void copy(const storage_union& src, storage_union& dest)
+        {
+            dest.dynamic = new T(*reinterpret_cast<const T*>(src.dynamic));
+        }
+
+
+        static void move(storage_union& src, storage_union& dest) noexcept
+        {
+            dest.dynamic = src.dynamic;
+            src.dynamic = nullptr;
+        }
+
+
+        static void swap(storage_union& lhs, storage_union& rhs) noexcept
+        {
+            std::swap(lhs.dynamic, rhs.dynamic);
+        }
+    };
+
+
+    /**
+     * VTable for stack allocated storage.
+     */
+    template<typename T>
+    struct vtable_stack
+    {
+        static const std::type_info& type() noexcept
+        {
+            return typeid(T);
+        }
+
+
+        static void destroy(storage_union& storage) noexcept
+        {
+            reinterpret_cast<T*>(&storage.stack)->~T();
+        }
+
+
+        static void copy(const storage_union& src, storage_union& dest)
+        {
+            new (&dest.stack) T(reinterpret_cast<const T&>(src.stack));
+        }
+
+
+        static void move(storage_union& src, storage_union& dest) noexcept
+        {
+            /**
+             * One of the conditions for using vtable_stack is a nothrow move constructor,
+             * so this move constructor will never throw a exception.
+             */
+            new (&dest.stack) T(std::move(reinterpret_cast<T&>(src.stack)));
+            destroy(src);
+        }
+
+
+        static void swap(storage_union& lhs, storage_union& rhs) noexcept
+        {
+            storage_union tmp_storage;
+            move(rhs, tmp_storage);
+            move(lhs, rhs);
+            move(tmp_storage, lhs);
+        }
+    };
+
+
+    /**
+     * Whether the type T must be dynamically allocated or can be stored on the stack.
+     */
+    template<typename T>
+    struct requires_allocation :
+        std::integral_constant<bool, !(std::is_nothrow_move_constructible<T>::value // N4562 6.3/3 [any.class]
+                                       && sizeof(T) <= sizeof(storage_union::stack)
+                                       && std::alignment_of<T>::value <= std::alignment_of<storage_union::stack_storage_t>::value)>
+    { };
+
+
+    /**
+     * Returns the pointer to the vtable of the type T.
+     */
+    template<typename T>
+    static vtable_type* vtable_for_type()
+    {
+        using VTableType = typename std::conditional<requires_allocation<T>::value, vtable_dynamic<T>, vtable_stack<T>>::type;
+        static vtable_type table = { VTableType::type, VTableType::destroy, VTableType::copy, VTableType::move, VTableType::swap };
+        return &table;
+    }
+
+
+protected:
+    template<typename T>
+    friend const T* any_cast(const any* operand) noexcept;
+
+
+    template<typename T>
+    friend T* any_cast(any* operand) noexcept;
+
+
+    /**
+     * Same effect as is_same(this->type(), t);
+     */
+    bool is_typed(const std::type_info& t) const
+    {
+        return is_same(this->type(), t);
+    }
+
+
+    /**
+     * Checks if two type infos are the same.
+     * If ANY_IMPL_FAST_TYPE_INFO_COMPARE is defined, checks only the address of the
+     * type infos, otherwise does an actual comparision. Checking addresses is
+     * only a valid approach when there's no interaction with outside sources
+     * (other shared libraries and such).
+     */
+    static bool is_same(const std::type_info& a, const std::type_info& b)
+    {
+#ifdef ANY_IMPL_FAST_TYPE_INFO_COMPARE
+        return &a == &b;
+#else
+        return a == b;
+#endif
+    }
+
+
+    /**
+     * Casts (with no type_info checks) the storage pointer as const T*.
+     */
+    template<typename T>
+    const T* cast() const noexcept
+    {
+        return requires_allocation<typename std::decay<T>::type>::value ? reinterpret_cast<const T*>(storage.dynamic) : reinterpret_cast<const T*>(&storage.stack);
+    }
+
+
+    /**
+     * Casts (with no type_info checks) the storage pointer as T*.
+     */
+    template<typename T>
+    T* cast() noexcept
+    {
+        return requires_allocation<typename std::decay<T>::type>::value ? reinterpret_cast<T*>(storage.dynamic) : reinterpret_cast<T*>(&storage.stack);
+    }
+
+
+private:
+    storage_union storage; // On offset(0) so no padding for align
+
+    vtable_type* vtable;
+
+
+    template<typename ValueType, typename T>
+    typename std::enable_if<requires_allocation<T>::value>::type do_construct(ValueType&& value)
+    {
+        storage.dynamic = new T(std::forward<ValueType>(value));
+    }
+
+
+    template<typename ValueType, typename T>
+    typename std::enable_if<!requires_allocation<T>::value>::type do_construct(ValueType&& value)
+    {
+        new (&storage.stack) T(std::forward<ValueType>(value));
+    }
+
+
+    /**
+     * Chooses between stack and dynamic allocation for the type decay_t<ValueType>,
+     * assigns the correct vtable, and constructs the object on our storage.
+     */
+    template<typename ValueType>
+    void construct(ValueType&& value)
+    {
+        using T = typename std::decay<ValueType>::type;
+
+        this->vtable = vtable_for_type<T>();
+
+        do_construct<ValueType,T>(std::forward<ValueType>(value));
+    }
+};
+
+
+namespace detail
+{
+    template<typename ValueType>
+    inline ValueType any_cast_move_if_true(typename std::remove_reference<ValueType>::type* p, std::true_type)
+    {
+        return std::move(*p);
+    }
+
+
+    template<typename ValueType>
+    inline ValueType any_cast_move_if_true(typename std::remove_reference<ValueType>::type* p, std::false_type)
+    {
+        return *p;
+    }
+}
+
+
+/**
+ * Performs *any_cast<add_const_t<remove_reference_t<ValueType>>>(&operand), or throws bad_any_cast on failure.
+ */
+template<typename ValueType>
+inline ValueType any_cast(const any& operand)
+{
+    auto p = any_cast<typename std::add_const<typename std::remove_reference<ValueType>::type>::type>(&operand);
+    if(p == nullptr) throw bad_any_cast();
+    return *p;
+}
+
+
+/**
+ * Performs *any_cast<remove_reference_t<ValueType>>(&operand), or throws bad_any_cast on failure.
+ */
+template<typename ValueType>
+inline ValueType any_cast(any& operand)
+{
+    auto p = any_cast<typename std::remove_reference<ValueType>::type>(&operand);
+    if(p == nullptr) throw bad_any_cast();
+    return *p;
+}
+
+
+/**
+ * If ANY_IMPL_ANYCAST_MOVEABLE is not defined, does as N4562 specifies:
+ *     Performs *any_cast<remove_reference_t<ValueType>>(&operand), or throws bad_any_cast on failure.
+ *
+ * If ANY_IMPL_ANYCAST_MOVEABLE is defined, does as LWG Defect 2509 specifies [1]:
+ *     If ValueType is MoveConstructible and isn't a lvalue reference, performs
+ *     std::move(*any_cast<remove_reference_t<ValueType>>(&operand)), otherwise
+ *     *any_cast<remove_reference_t<ValueType>>(&operand).
+ *     Throws bad_any_cast on failure.
+ *
+ *     [1] https://cplusplus.github.io/LWG/lwg-active.html#2509
+ */
+template<typename ValueType>
+inline ValueType any_cast(any&& operand)
+{
+#ifdef ANY_IMPL_ANY_CAST_MOVEABLE
+    using can_move = std::integral_constant<bool, std::is_move_constructible<ValueType>::value && !std::is_lvalue_reference<ValueType>::value>;
+#else
+    using can_move = std::false_type;
+#endif
+
+    auto p = any_cast<typename std::remove_reference<ValueType>::type>(&operand);
+    if(p == nullptr) throw bad_any_cast();
+    return detail::any_cast_move_if_true<ValueType>(p, can_move());
+}
+
+
+/**
+ * If operand != nullptr && operand->type() == typeid(ValueType), a pointer to the object
+ * contained by operand, otherwise nullptr.
+ */
+template<typename T>
+inline const T* any_cast(const any* operand) noexcept
+{
+    if(operand == nullptr || !operand->is_typed(typeid(T)))
+        return nullptr;
+    else
+        return operand->cast<T>();
+}
+
+
+/**
+ * If operand != nullptr && operand->type() == typeid(ValueType), a pointer to the object
+ * contained by operand, otherwise nullptr.
+ */
+template<typename T>
+inline T* any_cast(any* operand) noexcept
+{
+    if(operand == nullptr || !operand->is_typed(typeid(T)))
+        return nullptr;
+    else
+        return operand->cast<T>();
+}
+
+
+inline void swap(any& lhs, any& rhs) noexcept
+{
+    lhs.swap(rhs);
+}
+
+}
+
+#endif /* AIDGE_CORE_UTILS_ANY_H_ */
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d3444000191022b575adaf1430319479daa5d4fc
--- /dev/null
+++ b/include/aidge/utils/Attributes.hpp
@@ -0,0 +1,77 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_ATTRIBUTES_H_
+#define AIDGE_CORE_UTILS_ATTRIBUTES_H_
+
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#endif
+#include <vector>
+#include <string>
+#include <set>
+
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
+namespace {
+// This is the type that will hold all the strings. Each enumerate type will
+// declare its own specialization.
+template <typename T> struct EnumStrings {
+    static const char* const data[];
+};
+}
+
+namespace Aidge {
+template<class T, std::size_t N>
+constexpr std::size_t size(T (&)[N]) { return N; }
+
+/* This abstract class allows to avoid binding Attributes.
+*  Otherwise we would need to bind every template possible of Attributes.
+*  Every operators can access the methods of this class by inheriting from
+*  Attributes in the binding code.
+*/
+class Attributes {
+public:
+    /**
+     * @brief Check if the attribute exists.
+     * @param name Name of the attribute to check.
+     * @return bool True if the attribute exists, false otherwise.
+    */
+    virtual bool hasAttr(const std::string& name) const = 0;
+
+    /**
+     * @brief Get the (implementation defined) name of the type of an attribute, returned by std::type_info::name.
+     * @param name Name of the attribute.
+     * @return std::string Name of the type as returned by std::type_info::name.
+    */
+    virtual std::string getAttrType(const std::string& name) const = 0;
+
+    /**
+     * @brief Get the attribute's name list.
+     * @return std::set<std::string> Vector of names of the attributes.
+    */
+    virtual std::set<std::string> getAttrsName() const = 0;
+
+#ifdef PYBIND
+    /* Bindable get function, does not recquire any templating.
+    *  This is thanks to py::object which allow the function to
+    *  be agnostic from its return type.
+    */
+    virtual py::object getAttrPy(const std::string& name) const = 0;
+#endif
+    virtual ~Attributes() {}
+};
+}
+
+#endif /* AIDGE_CORE_UTILS_ATTRIBUTES_H_ */
diff --git a/include/aidge/utils/CParameter.hpp b/include/aidge/utils/CParameter.hpp
deleted file mode 100644
index 0f4c74ab8bccb7bc134e035a5f12d31d51663e5d..0000000000000000000000000000000000000000
--- a/include/aidge/utils/CParameter.hpp
+++ /dev/null
@@ -1,115 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPARAMETER_H_
-#define AIDGE_CPARAMETER_H_
-
-#include <assert.h>
-#include <map>
-#include <vector>
-
-namespace Aidge {
-
-///\todo store also a fix-sized code that indicates the type
-///\todo managing complex types or excluding non-trivial, non-aggregate types
-class CParameter
-{
-private:
-    template <typename T>
-    struct is_vector : std::false_type {};
-
-    template <typename T, typename Alloc>
-    struct is_vector<std::vector<T, Alloc>> : std::true_type {};
-
-public:
-    // not copyable, not movable
-    CParameter(CParameter const &) = delete;
-    CParameter(CParameter &&) = delete;
-    CParameter &operator=(CParameter const &) = delete;
-    CParameter &operator=(CParameter &&) = delete;
-    CParameter() : m_Params({}){};
-    ~CParameter() = default;
-
-    /**
-     * \brief Returning a parameter identified by its name
-     * \tparam T expected parameter type
-     * \param i_ParamName Parameter name
-     * \details assert if T is not the actual parameter type, if the parameter does not
-     *  exist or interna parameter position is invalid.
-     * \todo Returning a T const& ? But dangerous => the client may get an address within
-     *  param buffer that will get invalid after the CParam death.
-     * \note at() throws if the parameter does not exist, using find to test for parameter existance
-     */
-    template<class T> T Get(std::string const i_ParamName) const
-    {
-        assert(m_Params.find(i_ParamName) != m_Params.end());
-        assert(m_Types.find(i_ParamName) != m_Types.end());
-        assert(m_Params.at(i_ParamName) <= m_OffSet);
-        assert(typeid(T).name() == m_Types.at(i_ParamName));
-        return *reinterpret_cast<T *>(m_BeginBuffer + m_Params.at(i_ParamName));
-    }
-
-    ///\brief Add a parameter value, identified by its name
-    ///\tparam T expected parameter type
-    ///\param i_ParamName Parameter name
-    ///\param i_Value Parameter value
-    ///\todo Pass i_Value by ref if large or not trivial
-    ///\bug If parameter already exists, its value is changed but written in the
-    /// internal buffer in a new location (previous value is still in memory at its previous location)
-    template<class T> void Add(std::string const &i_ParamName, T const &i_Value)
-    {
-        m_Buffer.resize(m_Buffer.size() + (sizeof(T) / sizeof(uint8_t)));
-        m_BeginBuffer = m_Buffer.data(); // Update buffer ptr in case of memory reordering
-        *reinterpret_cast<T *>(m_BeginBuffer + m_OffSet)
-            = i_Value; // Black-magic used to add anytype into the vector
-        m_Params[i_ParamName] = m_OffSet; // Copy pointer offset
-        m_OffSet += sizeof(T); // Increment offset
-
-        m_Types[i_ParamName] = typeid(i_Value).name();
-    }
-
-
-    std::string getParamType(std::string const &i_ParamName){
-        return m_Types[i_ParamName];
-    }
-
-    std::vector<std::string> getParametersName(){
-        std::vector<std::string> parametersName;
-        for(auto const& it: m_Params)
-            parametersName.push_back(it.first);
-        return parametersName;
-    }
-
-private:
-    std::map<std::string, std::size_t> m_Params; // { Param name : offset }
-
-    ///\brief Map to check type error
-    /* Note : i tried this : `std::map<std::string, std::type_info const *> mTypes;`
-    but looks like the type_ingo object was destroyed.
-    I am not a hugde fan of storing a string and making string comparison.
-    Maybe we can use a custom enum type (or is there a standard solution ?)
-    */
-    std::map<std::string, std::string> m_Types;
-
-    ///\brief All parameters values concatenated in raw binary form.
-    std::vector<uint8_t> m_Buffer = {};
-
-    ///\brief Starting address of the buffer
-    uint8_t *m_BeginBuffer = m_Buffer.data();
-
-    ///\brief Offset, in number of uint8_t, of the next parameter to write
-    std::size_t m_OffSet = 0;
-
-};
-
-}
-
-#endif /* AIDGE_CPARAMETER_H_ */
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..60f586edf947cef0e139049814263a29b4d01e24
--- /dev/null
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -0,0 +1,221 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_
+#define AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_
+
+#include <map>
+#include <vector>
+#include <type_traits>
+#include <typeinfo>
+#include <cassert>
+#include <string>
+
+#include "aidge/utils/Any.hpp"
+#include "aidge/utils/Attributes.hpp"
+
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <pybind11/embed.h>
+
+namespace py = pybind11;
+#endif
+
+
+namespace Aidge {
+
+///\todo store also a fix-sized code that indicates the type
+///\todo managing complex types or excluding non-trivial, non-aggregate types
+class DynamicAttributes : public Attributes {
+public:
+    /**
+     * \brief Returning an Attribute identified by its name
+     * \tparam T expected Attribute type
+     * \param name Attribute name
+     * \details assert if T is not the actual Attribute type or if the Attribute does not
+     *  exist
+     * \note at() throws if the Attribute does not exist, using find to test for Attribute existance
+     */
+    template<class T> T& getAttr(const std::string& name)
+    {
+#ifdef PYBIND
+        // If attribute does not exist in C++, it might have been created or modified in Python
+        auto it = mAttrs.find(name);
+        if (it == mAttrs.end()) {
+            auto itPy = mAttrsPy.find(name);
+            if (itPy != mAttrsPy.end()) {
+                // Insert the attribute back in C++
+                mAttrs.emplace(std::make_pair(name, libany::any(itPy->second.cast<T>())));
+            }
+        }
+#endif
+
+        return libany::any_cast<T&>(mAttrs.at(name));
+    }
+
+    template<class T> const T& getAttr(const std::string& name) const
+    {
+#ifdef PYBIND
+        // If attribute does not exist in C++, it might have been created or modified in Python
+        auto it = mAttrs.find(name);
+        if (it == mAttrs.end()) {
+            auto itPy = mAttrsPy.find(name);
+            if (itPy != mAttrsPy.end()) {
+                // Insert the attribute back in C++
+                mAttrs.emplace(std::make_pair(name, libany::any(itPy->second.cast<T>())));
+            }
+        }
+#endif
+
+        return libany::any_cast<const T&>(mAttrs.at(name));
+    }
+
+    ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
+    ///\tparam T expected Attribute type
+    ///\param name Attribute name
+    ///\param value Attribute value
+    template<class T> void addAttr(const std::string& name, const T& value)
+    {
+        const auto& res = mAttrs.emplace(std::make_pair(name, libany::any(value)));
+        assert(res.second && "attribute already exists");
+
+#ifdef PYBIND
+        // We cannot handle Python object if the Python interpreter is not running
+        if (Py_IsInitialized()) {
+            // Keep a copy of the attribute in py::object that is updated everytime
+            mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+        }
+#endif
+    }
+
+    ///\brief Set an Attribute value, identified by its name. If it already exists, its value (and type, if different) is changed.
+    ///\tparam T expected Attribute type
+    ///\param name Attribute name
+    ///\param value Attribute value
+    template<class T> void setAttr(const std::string& name, const T& value)
+    {
+        auto res = mAttrs.emplace(std::make_pair(name, libany::any(value)));
+        if (!res.second)
+            res.first->second = libany::any(value);
+
+#ifdef PYBIND
+        // We cannot handle Python object if the Python interpreter is not running
+        if (Py_IsInitialized()) {
+            // Keep a copy of the attribute in py::object that is updated everytime
+            auto resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+            if (!resPy.second)
+                resPy.first->second = std::move(py::cast(value));
+        }
+#endif
+    }
+
+    void delAttr(const std::string& name) {
+        mAttrs.erase(name);
+#ifdef PYBIND
+        mAttrsPy.erase(name);
+#endif
+    }
+
+#ifdef PYBIND
+    void addAttrPy(const std::string& name, py::object&& value)
+    {
+        auto it = mAttrs.find(name);
+        assert(it == mAttrs.end() && "attribute already exists");
+
+        const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
+        assert(res.second && "attribute already exists");
+    }
+
+    void setAttrPy(const std::string& name, py::object&& value)
+    {
+        auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
+        if (!resPy.second)
+            resPy.first->second = std::move(value);
+
+        // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
+        mAttrs.erase(name);
+    }
+#endif
+
+    //////////////////////////////////////
+    ///     Generic Attributes API
+    //////////////////////////////////////
+    bool hasAttr(const std::string& name) const override final {
+#ifdef PYBIND
+        // Attributes might have been created in Python, the second condition is necessary.
+        return (mAttrs.find(name) != mAttrs.end() || mAttrsPy.find(name) != mAttrsPy.end());
+#else
+        return (mAttrs.find(name) != mAttrs.end());
+#endif
+    }
+
+    std::string getAttrType(const std::string& name) const override final {
+        // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is:
+        // - C-style for C++ created attributes
+        // - Python-style for Python created attributes
+#ifdef PYBIND
+        // If attribute does not exist in C++, it might have been created in Python
+        auto it = mAttrs.find(name);
+        if (it == mAttrs.end()) {
+            auto itPy = mAttrsPy.find(name);
+            if (itPy != mAttrsPy.end()) {
+                return std::string(Py_TYPE(itPy->second.ptr())->tp_name);
+            }
+        }
+#endif
+
+        return mAttrs.at(name).type().name();
+    }
+
+    std::set<std::string> getAttrsName() const override final {
+        std::set<std::string> attrsName;
+        for(auto const& it: mAttrs)
+            attrsName.insert(it.first);
+#ifdef PYBIND
+        // Attributes might have been created in Python
+        for(auto const& it: mAttrsPy)
+            attrsName.insert(it.first);
+#endif
+        return attrsName;
+    }
+
+#ifdef PYBIND
+    /**
+     * @detail See https://github.com/pybind/pybind11/issues/1590 as to why a
+     * generic type caster for std::any is not feasable.
+     * The strategy here is to keep a copy of each attribute in py::object that is updated everytime.
+    */
+    py::object getAttrPy(const std::string& name) const {
+        return mAttrsPy.at(name);
+    };
+#endif
+
+private:
+#ifdef PYBIND
+    // Stores C++ attributes (copy) and Python-only attributes
+    // Code should be compiled with -fvisibility=hidden
+    // See https://pybind11.readthedocs.io/en/stable/faq.html:
+    // “‘SomeClass’ declared with greater visibility than the type of its 
+    // field ‘SomeClass::member’ [-Wattributes]”
+    // This map will only be populated if Python interpreter is running
+    std::map<std::string, py::object> mAttrsPy;
+    // Stores C++ attributes only
+    // mutable because it may be updated in getAttr() from Python
+    mutable std::map<std::string, libany::any> mAttrs;
+#else
+    std::map<std::string, libany::any> mAttrs;
+#endif
+};
+
+}
+
+#endif /* AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/Parameter.hpp b/include/aidge/utils/Parameter.hpp
deleted file mode 100644
index b0c6e35950187f17d991cfe5b2c9bd2b09f1e70f..0000000000000000000000000000000000000000
--- a/include/aidge/utils/Parameter.hpp
+++ /dev/null
@@ -1,197 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CORE_UTILS_PARAMETER_H_
-#define AIDGE_CORE_UTILS_PARAMETER_H_
-
-#ifdef PYBIND
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
-#include <string> // Add this inclue to print error
-#endif
-#include <tuple>
-#include <cassert>
-#include <cstddef>
-
-#ifdef PYBIND
-namespace py = pybind11;
-#endif
-
-namespace {
-// This is the type that will hold all the strings. Each enumerate type will
-// declare its own specialization.
-template <typename T> struct EnumStrings {
-    static const char* const data[];
-};
-}
-
-namespace Aidge {
-template<class T, std::size_t N>
-constexpr std::size_t size(T (&)[N]) { return N; }
-
-#ifdef PYBIND
-/* This abstract class allows to avoid binding Parametrizable.
-*  Otherwise we would need to bind every template possible of Parametrizable.
-*  Every operators can access the methods of this class by inheriting from
-*  PyAbstractParametrizable in the binding code.
-*/
-class PyAbstractParametrizable{
-    public:
-        /* Bindable get function, does not recquire any templating.
-        *  This is thanks to py::object which allow the function to
-        *  be agnostic from its return type.
-        */
-        virtual py::object getPy(const char* /*name*/) = 0;
-};
-#endif
-
-template <class PARAM_ENUM, class ...T>
-class Parameterizable
-#ifdef PYBIND
-    : public PyAbstractParametrizable
-#endif
-    {
-public:
-    using Parameters = std::tuple<T...>;
-
-    // Helper class to pass to the constructor
-    template <PARAM_ENUM paramEnum>
-    class param {
-    public:
-        constexpr param(const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& v) : value(v) {}
-        const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type value;
-    };
-
-/*
-    // Direct tuple initialization
-    Parameterizable(T... params) : mParams({params...}) {
-
-    }
-*/
-
-    // Constructor for parameters initialization.
-    // Compile-time garantee that every parameter is initialized.
-    template <PARAM_ENUM ...paramEnum> // non-type parameter pack
-    constexpr Parameterizable(const param<paramEnum>&&... params) {
-        // Check number of params consistency
-        static_assert(sizeof...(params) == std::tuple_size<std::tuple<T...>>::value, "wrong number of parameters in constructor");
-        // static_assert(size(EnumStrings<PARAM_ENUM>::data) == std::tuple_size<std::tuple<T...>>::value, "wrong number of parameters in enum string");
-
-        // Check no duplicates
-        constexpr std::array<PARAM_ENUM, std::tuple_size<std::tuple<T...>>::value> pe = { paramEnum... };
-        static_assert(!hasDuplicates(pe), "duplicate parameter"); // requires C++14
-
-        // Init params with constructor arguments
-        const std::array<PARAM_ENUM, std::tuple_size<std::tuple<T...>>::value> p = { ((void)(get<paramEnum>() = params.value), paramEnum) ... };
-        (void)p; // avoid unused warning
-    }
-
-    // Compile-time access with enum
-    template <PARAM_ENUM paramEnum>
-    constexpr typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() {
-        return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
-    }
-
-    template <PARAM_ENUM paramEnum>
-    constexpr const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() const {
-        return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
-    }
-
-    // Runtime access with enum
-    template <typename R>
-    constexpr R& get(PARAM_ENUM paramEnum) {
-        return get<R>(static_cast<std::size_t>(paramEnum));
-    }
-
-    template <typename R>
-    constexpr const R& get(PARAM_ENUM paramEnum) const {
-        return get<R>(static_cast<std::size_t>(paramEnum));
-    }
-
-    // Runtime existance check with name
-    constexpr bool isParam(const char* name) const {
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-
-    // Runtime access with name
-    template <typename R>
-    constexpr R& get(const char* name) {
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
-                return get<R>(i);
-            }
-        }
-
-        assert(false && "parameter not found");
-    }
-
-    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
-    constexpr typename std::enable_if<(SIZE > 0), R&>::type get(std::size_t i) {
-        if (i == SIZE) {
-            if (std::is_same<R, typename std::tuple_element<SIZE,std::tuple<T...>>::type>::value) {
-                return reinterpret_cast<R&>(std::get<SIZE>(mParams));
-            }
-            else {
-                assert(false && "wrong parameter type");
-            }
-        }
-        else {
-            return get<R, SIZE-1>(i);
-        }
-    }
-
-    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
-    constexpr typename std::enable_if<(SIZE <= 0), R&>::type get(std::size_t i) {
-        assert(false && "parameter not found");
-    }
-
-    constexpr const std::tuple<T...>& getParams() const {
-        return mParams;
-    }
-
-    #ifdef PYBIND
-    py::object getPy(const char* name){
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
-                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
-                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
-                return py::detail::accessor_policies::tuple_item::get(py::cast(mParams), static_cast<py::size_t>(i));
-            }
-        }
-        throw py::value_error("Parameter : " + std::string(name) + " does not exist." );
-    };
-    #endif
-
-private:
-    template <typename V, std::size_t N>
-    static constexpr bool hasDuplicates(const std::array<V, N>& array) {
-        for (std::size_t i = 1; i < N; i++) {
-            for (std::size_t j = 0; j < i; j++) {
-                if (array[i] == array[j]) {
-                    return true;
-                }
-            }
-        }
-
-        return false;
-    }
-
-    std::tuple<T...> mParams;
-};
-}
-
-#endif /* AIDGE_CORE_UTILS_PARAMETER_H_ */
diff --git a/include/aidge/utils/Recipies.hpp b/include/aidge/utils/Recipies.hpp
index 4cbf8fd284bef314dbe28b19ebdae05172467bad..894e56fae2e9c2f6bcf11e4e76a433f5c8058080 100644
--- a/include/aidge/utils/Recipies.hpp
+++ b/include/aidge/utils/Recipies.hpp
@@ -17,11 +17,54 @@
 
 namespace Aidge{
 
+// FUSE MATMUL + ADD -> FC
+
+/**
+ * @brief Merge ``MatMul`` and :cpp:function:`Aidge::Add` Node into a :cpp:function:`Aidge::FC` Node.
+ *
+ * @param nodes Strict set of Node to merge.
+ */
 void fuseMulAdd(std::set<std::shared_ptr<Node>> nodes);
+/**
+ * @brief Merge ``MatMul`` and :cpp:function:`Aidge::Add` Node into a :cpp:function:`Aidge::FC` Node.
+ *
+ * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ */
+void fuseMulAdd(std::shared_ptr<GraphView> graphView);
+
+
+// REMOVE FLATTEN + FC -> FC
+
+/**
+ * @brief Remove ``Flatten`` before :cpp:function:`Aidge::FC` Node.
+ *
+ * @param nodes Strict set of Node to merge.
+ */
 void removeFlatten(std::set<std::shared_ptr<Node>> nodes);
+/**
+ * @brief Remove ``Flatten`` before :cpp:function:`Aidge::FC` Node.
+ *
+ * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ */
+void removeFlatten(std::shared_ptr<GraphView> graphView);
+ 
+// FUSE BN + FC || CONV -> FC || CONV
 
+/**
+ * @brief Fuse :cpp:function:`Aidge::BatchNorm` with :cpp:function:`Aidge::Conv` or :cpp:function:`Aidge::FC` Nodes.
+ * Ref: https://nenadmarkus.com/p/fusing-batchnorm-and-conv/
+ *
+ * @param nodes Strict set of Node to merge.
+ */
+void fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes);
+/**
+ * @brief Fuse :cpp:function:`Aidge::BatchNorm` with :cpp:function:`Aidge::Conv` or :cpp:function:`Aidge::FC` Nodes.
+ * Ref: https://nenadmarkus.com/p/fusing-batchnorm-and-conv/
+ *
+ * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ */
+void fuseBatchNorm(std::shared_ptr<GraphView> graphView);
 
 }
 
-
-#endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 95961b4a2380e944b277d9f9c69be77e87977de9..3b29c472b3a540c9ef3b8ed46520e3e718e8cbfb 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -34,6 +34,7 @@ public:
     static std::map<Key, std::function<Func>>& registry()
     {
         #ifdef PYBIND
+        #define _CRT_SECURE_NO_WARNINGS
         if (std::getenv("AIDGE_CORE_WITH_PYBIND")){
             std::string name = std::string("registrar_")+typeid(Registrable<DerivedClass, Key, Func>).name();
             static auto shared_data = reinterpret_cast<std::map<Key, std::function<Func>> *>(py::get_shared_data(name));
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..fb800cffbcff5d4113961f8e62977417336f2cb8
--- /dev/null
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -0,0 +1,204 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
+#define AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
+
+#include <tuple>
+#include <cassert>
+#include <cstddef>
+#include <typeinfo>
+
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/Utils.hpp"
+
+namespace Aidge {
+/**
+ * @brief This class is designed to handle static attributes (i.e. known at compile-time) 
+ * with named accessors, with minimal overhead (the name strings are not stored in each object 
+ * instance and it remains possible to access attribute without overhead at compile-time).
+*/
+template <class ATTRS_ENUM, class ...T>
+class StaticAttributes : public Attributes {
+public:
+    using Attrs = std::tuple<T...>;
+
+    // Helper class to pass to the constructor
+    template <ATTRS_ENUM attrsEnum>
+    class attr {
+    public:
+        constexpr attr(const typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type& v) : value(v) {}
+        const typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type value;
+    };
+
+/*
+    // Direct tuple initialization
+    StaticAttributes(T... attrs) : mAttrs({attrs...}) {
+
+    }
+*/
+
+    // Constructor for Attributes initialization.
+    // Compile-time garantee that every attribute is initialized.
+    template <ATTRS_ENUM ...attrsEnum> // non-type attribute pack
+    constexpr StaticAttributes(const attr<attrsEnum>&&... attrs) {
+        // Check number of attrs consistency
+        static_assert(sizeof...(attrs) == std::tuple_size<std::tuple<T...>>::value, "wrong number of attributes in constructor");
+        // static_assert(size(EnumStrings<ATTRS_ENUM>::data) == std::tuple_size<std::tuple<T...>>::value, "wrong number of attributes in enum string");
+
+        // Check no duplicates
+        constexpr std::array<ATTRS_ENUM, std::tuple_size<std::tuple<T...>>::value> pe = { attrsEnum... };
+        static_assert(!hasDuplicates(pe), "duplicate attribute"); // requires C++14
+
+        // Init attrs with constructor arguments
+        const std::array<ATTRS_ENUM, std::tuple_size<std::tuple<T...>>::value> p = { ((void)(getAttr<attrsEnum>() = attrs.value), attrsEnum) ... };
+        (void)p; // avoid unused warning
+    }
+
+    // Compile-time access with enum
+    template <ATTRS_ENUM attrsEnum>
+    constexpr typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type& getAttr() {
+        return std::get<static_cast<std::size_t>(attrsEnum)>(mAttrs);
+    }
+
+    template <ATTRS_ENUM attrsEnum>
+    constexpr const typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type& getAttr() const {
+        return std::get<static_cast<std::size_t>(attrsEnum)>(mAttrs);
+    }
+
+    // Runtime access with enum
+    template <typename R>
+    constexpr R& getAttr(ATTRS_ENUM attrsEnum) {
+        return getAttr<R>(static_cast<std::size_t>(attrsEnum));
+    }
+
+    template <typename R>
+    constexpr const R& getAttr(ATTRS_ENUM attrsEnum) const {
+        return getAttr<R>(static_cast<std::size_t>(attrsEnum));
+    }
+
+    // Runtime access with name
+    template <typename R>
+    constexpr R& getAttr(const char* name) {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (strcmp(EnumStrings<ATTRS_ENUM>::data[i], name) == 0) {
+                return getAttr<R>(i);
+            }
+        }
+
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name);
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    constexpr typename std::enable_if<(SIZE > 0), R&>::type getAttr(std::size_t i) {
+        if (i == SIZE-1) {
+            if (std::is_same<R, typename std::tuple_element<SIZE-1,std::tuple<T...>>::type>::value) {
+                return reinterpret_cast<R&>(std::get<SIZE-1>(mAttrs));
+            }
+            else {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "wrong type for attribute with index %lu", i);
+            }
+        }
+        else {
+            return getAttr<R, SIZE-1>(i);
+        }
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    [[noreturn]] constexpr typename std::enable_if<(SIZE == 0), R&>::type getAttr(std::size_t /*i*/) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
+    }
+
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    constexpr typename std::enable_if<(SIZE > 0), const std::type_info&>::type getAttrType(std::size_t i) const {
+        if (i == SIZE-1) {
+            return typeid(typename std::tuple_element<SIZE-1,std::tuple<T...>>::type);
+        }
+        else {
+            return getAttrType<SIZE-1>(i);
+        }
+    }
+
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    [[noreturn]] constexpr typename std::enable_if<(SIZE == 0), const std::type_info&>::type getAttrType(std::size_t /*i*/) const {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
+    }
+
+    constexpr const std::tuple<T...>& getStaticAttributes() const {
+        return mAttrs;
+    }
+
+    //////////////////////////////////////
+    ///     Generic Attributes API
+    //////////////////////////////////////
+    // Runtime existance check with name
+    constexpr bool hasAttr(const std::string& name) const override final {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+
+    // Runtime type access with name
+    constexpr std::string getAttrType(const std::string& name) const override final {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                return getAttrType(i).name();
+            }
+        }
+
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name.c_str());
+    }
+
+    std::set<std::string> getAttrsName() const override final {
+        std::set<std::string> attrsName;
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            attrsName.insert(EnumStrings<ATTRS_ENUM>::data[i]);
+        }
+        return attrsName;
+    }
+
+    #ifdef PYBIND
+    py::object getAttrPy(const std::string& name) const {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
+                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
+                return py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+            }
+        }
+
+        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str());
+    };
+    #endif
+
+private:
+    template <typename V, std::size_t N>
+    static constexpr bool hasDuplicates(const std::array<V, N>& array) {
+        for (std::size_t i = 1; i < N; i++) {
+            for (std::size_t j = 0; j < i; j++) {
+                if (array[i] == array[j]) {
+                    return true;
+                }
+            }
+        }
+
+        return false;
+    }
+
+    std::tuple<T...> mAttrs;
+};
+}
+
+#endif /* AIDGE_CORE_UTILS_STATICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6387619546c66922e48cf95a8a56487d4b0d0641
--- /dev/null
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -0,0 +1,52 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_TENSOR_UTILS_H_
+#define AIDGE_CORE_UTILS_TENSOR_UTILS_H_
+#include <cmath>  // std::abs
+#include "aidge/data/Tensor.hpp"
+
+/**
+ * @brief Compare two :cpp:class:`Aidge::Tensor` value wise. The comparison function is:
+ *
+ * |t1-t2| <= absolute + relative * |t2|
+ *
+ * If a tensor value is different from the other tensor return False
+ * If the tensor does not have the same size, return False
+ * If the datatype is not the same between each tensor return False
+ * If the templated type does not correspond to the datatype of each tensor, raise an assertion error
+ *
+ * @tparam T should correspond to the type of the tensor, define the type of the absolute and relative error
+ * @param t1  first :cpp:class:`Aidge::Tensor` to test
+ * @param t2  second :cpp:class:`Aidge::Tensor` to test
+ * @param relative relative difference allowed (should be betwen 0 and 1)
+ * @param absolute absolute error allowed (shoulmd be positive)
+ * @return true if both tensor are approximately equal and have the datatype, shape. Else return false
+ */
+template <typename T>
+bool approxEq(Aidge::Tensor t1, Aidge::Tensor t2, float relative, float absolute){
+    assert(t1.dataType() == t2.dataType());
+    assert(t1.dataType() == NativeType<T>::type);
+    assert(relative >= 0);
+    assert(absolute >= 0 && absolute<=1);
+
+    if (t1.size() != t2.size()){
+        return false;
+    }
+    for(size_t i; i < t1.size(); ++i){
+        if (static_cast<float>(std::abs(t1.get<T>(i) - t2.get<T>(i))) > (absolute + (relative * static_cast<float>(std::abs(t2.get<T>(i)))))){
+            return false;
+        }
+    }
+    return true;
+}
+
+#endif /* AIDGE_CORE_UTILS_TENSOR_UTILS_H_s */
diff --git a/include/aidge/utils/Utils.hpp b/include/aidge/utils/Utils.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..89dc25bee18a50fdb0dc45ecd204bad7fb8912df
--- /dev/null
+++ b/include/aidge/utils/Utils.hpp
@@ -0,0 +1,38 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+
+#ifndef AIDGE_UTILS_H_
+#define AIDGE_UTILS_H_
+
+#include <cstdio>
+
+#ifdef NO_EXCEPTIONS
+#define AIDGE_THROW_OR_ABORT(ex, ...) \
+do { std::printf(__VA_ARGS__); std::abort(); } while (false)
+#else
+#include <stdexcept>
+#include <memory>
+#define AIDGE_THROW_OR_ABORT(ex, ...) \
+do { \
+    int n = 128; \
+    std::unique_ptr<char[]> formatted; \
+    formatted.reset(new char[n]); \
+    const int len = std::snprintf(formatted.get(), n, __VA_ARGS__); \
+    if (len >= n) { \
+        formatted.reset(new char[len + 1]); \
+        std::snprintf(formatted.get(), len + 1, __VA_ARGS__); \
+    }; \
+    throw ex(formatted.get()); \
+} while (false)
+#endif
+
+#endif //AIDGE_UTILS_H_
\ No newline at end of file
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index d6442723ecc79527e8eaa7d3e03a466c085dfa58..31470e0eb2c50b5386b64498f89419801b133d3a 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -26,10 +26,10 @@ namespace Aidge {
 
 template<typename T>
 void addCtor(py::class_<Tensor,
-                        std::shared_ptr<Tensor>, 
-                        Data, 
+                        std::shared_ptr<Tensor>,
+                        Data,
                         Registrable<Tensor,
-                                    std::tuple<std::string, DataType>, 
+                                    std::tuple<std::string, DataType>,
                                     std::unique_ptr<TensorImpl>(const Tensor&)>>& mTensor){
     mTensor.def(py::init([]( py::array_t<T, py::array::c_style | py::array::forcecast> b) {
         /* Request a buffer descriptor from Python */
@@ -46,24 +46,27 @@ void addCtor(py::class_<Tensor,
         }else{
             printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n");
         }
-        
+
         return newTensor;
-    }));
+    }))
+    .def("__setitem__", (void (Tensor::*)(std::size_t, T)) &Tensor::set)
+    .def("__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) &Tensor::set)
+    ;
 }
 
 
 void init_Tensor(py::module& m){
     py::class_<Registrable<Tensor,
-                           std::tuple<std::string, DataType>, 
+                           std::tuple<std::string, DataType>,
                            std::unique_ptr<TensorImpl>(const Tensor&)>,
                std::shared_ptr<Registrable<Tensor,
-                                           std::tuple<std::string, DataType>, 
+                                           std::tuple<std::string, DataType>,
                                            std::unique_ptr<TensorImpl>(const Tensor&)>>>(m,"TensorRegistrable");
 
-    py::class_<Tensor, std::shared_ptr<Tensor>, 
-               Data, 
+    py::class_<Tensor, std::shared_ptr<Tensor>,
+               Data,
                Registrable<Tensor,
-                           std::tuple<std::string, DataType>, 
+                           std::tuple<std::string, DataType>,
                            std::unique_ptr<TensorImpl>(const Tensor&)>> pyClassTensor
         (m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
 
@@ -74,6 +77,8 @@ void init_Tensor(py::module& m){
     .def("size", &Tensor::size)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&)) &Tensor::resize)
     .def("has_impl", &Tensor::hasImpl)
+    .def("get_coord", &Tensor::getCoord)
+    .def("get_idx", &Tensor::getIdx)
     .def_static("get_available_backends", &Tensor::getAvailableBackends)
     .def("__str__", [](Tensor& b) {
         return b.toString();
@@ -82,15 +87,27 @@ void init_Tensor(py::module& m){
         return b.size();
     })
     .def("__getitem__", [](Tensor& b, size_t idx)-> py::object {
-        // TODO : Should return error if backend not compatible with get
         if (idx >= b.size()) throw py::index_error();
         switch(b.dataType()){
             case DataType::Float64:
-                return py::cast(static_cast<double*>(b.getImpl()->rawPtr())[idx]);
+                return py::cast(b.get<double>(idx));
+            case DataType::Float32:
+                return py::cast(b.get<float>(idx));
+            case DataType::Int32:
+                return py::cast(b.get<int>(idx));
+            default:
+                return py::none();
+        }
+    })
+    .def("__getitem__", [](Tensor& b, std::vector<size_t> coordIdx)-> py::object {
+        if (b.getIdx(coordIdx) >= b.size()) throw py::index_error();
+        switch(b.dataType()){
+            case DataType::Float64:
+                return py::cast(b.get<double>(coordIdx));
             case DataType::Float32:
-                return py::cast(static_cast<float*>(b.getImpl()->rawPtr())[idx]);
+                return py::cast(b.get<float>(coordIdx));
             case DataType::Int32:
-                return py::cast(static_cast<int*>(b.getImpl()->rawPtr())[idx]);
+                return py::cast(b.get<int>(coordIdx));
             default:
                 return py::none();
         }
@@ -126,12 +143,12 @@ void init_Tensor(py::module& m){
         }
 
         return py::buffer_info(
-            tensorImpl->rawPtr(),                       /* Pointer to buffer */
-            tensorImpl->scalarSize(),                   /* Size of one scalar */
-            dataFormatDescriptor,                /* Python struct-style format descriptor */
-            b.nbDims(),                                 /* Number of dimensions */
-            dims,                                       /* Buffer dimensions */
-            strides                                     /* Strides (in bytes) for each index */
+            tensorImpl->rawPtr(),       /* Pointer to buffer */
+            tensorImpl->scalarSize(),   /* Size of one scalar */
+            dataFormatDescriptor,       /* Python struct-style format descriptor */
+            b.nbDims(),                 /* Number of dimensions */
+            dims,                       /* Buffer dimensions */
+            strides                     /* Strides (in bytes) for each index */
         );
     });
 
@@ -142,6 +159,6 @@ void init_Tensor(py::module& m){
 // #if SIZE_MAX != 0xFFFFFFFF
     addCtor<double>(pyClassTensor);
 // #endif
-    
+
 }
 }
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 3efcf7c5345bbc835aeaf6dcbc416769b8654439..ab8b4cf7b91d5eea2db5245a8c5122ab004b4766 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -12,7 +12,6 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Add.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 350a94a16cf07bf75e35d4bc2f9a4d93b7437264..5820e94c5cbd24150a4e81b0db34328ac35e1bf5 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -16,7 +16,6 @@
 #include <vector>
 #include <array>
 
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -27,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
-  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
+  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Operator, Attributes>(
     m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 70d9bce003033e1264ac39764271773fa84c760f..f43381fecc689a292e166c4da40ea0cb4842c9e6 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -14,7 +14,6 @@
 
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
@@ -22,7 +21,7 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, PyAbstractParametrizable>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, Attributes>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
 
     m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 4764884e8101091470e6f5259a3b3cc85db26992..91ede7b6a289f3def2a9c8261ff04d2ab9836cdd 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -16,7 +16,6 @@
 #include <vector>
 #include <array>
 
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -26,7 +25,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
-  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Operator, PyAbstractParametrizable>(
+  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Operator, Attributes>(
     m, ("ConvOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<DimSize_t,
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 639fcd01e5021093d8ef72960c6a1b920a9e1578..446bcdcceb3ba805223fc22e6fc19a22dcf354ec 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -16,7 +16,6 @@
 #include <vector>
 #include <array>
 
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -27,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
-  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Operator, PyAbstractParametrizable>(
+  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Operator, Attributes>(
     m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 3b4137c6f208f96d256c72300437cc978658b84f..4b9d61d082ebed4d426b41efa071d3943f83d231 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -12,7 +12,6 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/FC.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
@@ -21,7 +20,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, PyAbstractParametrizable>(m, "FC_Op", py::multiple_inheritance());
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Attributes>(m, "FC_Op", py::multiple_inheritance());
 
   m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index bec59eaf2cecdc7f64d1da07580116c4b3334992..4cf4dae2234900722058d6555582c5b78900ab7d 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -11,6 +11,7 @@
 
 #include <pybind11/pybind11.h>
 #include <pybind11/stl.h>
+#include <pybind11/functional.h>
 #include <stdio.h>
 
 #include "aidge/backend/OperatorImpl.hpp"
@@ -20,46 +21,11 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
-    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator>(m, "GenericOperatorOp",
+    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator, DynamicAttributes>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
-    .def("get_parameter_type", &GenericOperator_Op::getParameterType)
-    .def("get_parameters_name", &GenericOperator_Op::getParametersName)
-    .def("add_parameter", &GenericOperator_Op::addParameter<bool>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<int>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<float>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::string>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<bool>>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<int>>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<float>>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<std::string>>)
-    .def("get_parameter", [](GenericOperator_Op& self, std::string key) -> py::object {
-        /*
-        This getParameter method returns the good python type without having to have
-        prior knowledge of the parameter type.
-        */
-        py::object res = py::none();
-        std::string paramType = self.getParameterType(key);
-        if(paramType == typeid(int).name())
-            res = py::cast(self.getParameter<int>(key));
-        else if(paramType == typeid(float).name())
-            res = py::cast(self.getParameter<float>(key));
-        else if(paramType == typeid(bool).name())
-            res = py::cast(self.getParameter<bool>(key));
-        else if(paramType == typeid(std::string).name())
-            res = py::cast(self.getParameter<std::string>(key));
-        else if(paramType == typeid(std::vector<bool>).name())
-            res = py::cast(self.getParameter<std::vector<bool>>(key));
-        else if(paramType == typeid(std::vector<int>).name())
-            res = py::cast(self.getParameter<std::vector<int>>(key));
-        else if(paramType == typeid(std::vector<float>).name())
-            res = py::cast(self.getParameter<std::vector<float>>(key));
-        else if(paramType == typeid(std::vector<std::string>).name())
-            res = py::cast(self.getParameter<std::vector<std::string>>(key));
-        else {
-            throw py::key_error("Failed to convert parameter type " + key + ", this issue may come from typeid function which gave an unknown key : [" + paramType + "]. Please open an issue asking to add the support for this key.");
-        }
-        return res;
-    });
+    .def_readonly_static("identity", &GenericOperator_Op::Identity)
+    .def("compute_output_dims", &GenericOperator_Op::computeOutputDims)
+    .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"));
 
     m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nbDataIn"), py::arg("nbIn"), py::arg("nbOut"),
           py::arg("name") = "");
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index c062d93f5c40fe46336fe34f6d1664f24da07732..cae8a88bab7b59189dfbc6528cd653f1c97cb73a 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -13,13 +13,12 @@
 
 #include "aidge/operator/LeakyReLU.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/Parameter.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, PyAbstractParametrizable>(m, "LeakyReLU_Op", py::multiple_inheritance());
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, Attributes>(m, "LeakyReLU_Op", py::multiple_inheritance());
 
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index b6ae27289fabe1fe4dbeea60704a61373bc850cf..2f738550041bcdb1ae809d68fa24fdf5a72e9164 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -11,8 +11,7 @@
 
 #include <pybind11/pybind11.h>
 
-#include "aidge/operator/Matmul.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/operator/MatMul.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
@@ -20,13 +19,13 @@
 namespace py = pybind11;
 namespace Aidge {
 
-void declare_Matmul(py::module &m) {
-  py::class_<Matmul_Op, std::shared_ptr<Matmul_Op>, Operator, PyAbstractParametrizable>(m, "Matmul_Op", py::multiple_inheritance());
+void declare_MatMul(py::module &m) {
+  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Operator, Attributes>(m, "MatMul_Op", py::multiple_inheritance());
 
-  m.def("Matmul", &Matmul, py::arg("out_channels"), py::arg("name") = "");
+  m.def("MatMul", &MatMul, py::arg("out_channels"), py::arg("name") = "");
 }
 
-void init_Matmul(py::module &m) {
-  declare_Matmul(m);
+void init_MatMul(py::module &m) {
+  declare_MatMul(m);
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 7b51a45e2d6361d32b41ff30652bd9bbf0f7a4e5..a930b496b49280629d71725cee79aea4d850358e 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -16,7 +16,6 @@
 #include <vector>
 #include <array>
 
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -27,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
-  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, Attributes>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index ea9880800059e8993996e67138f89419c165fc4f..1c62cd0adf6b8712073ec0674754ce7c8c2014a5 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -13,7 +13,6 @@
 #include <pybind11/stl.h>
 
 #include "aidge/utils/Types.h"
-#include "aidge/utils/Parameter.hpp"
 // #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
@@ -26,18 +25,19 @@ template <DimIdx_t DIM>
 void declare_Producer(py::module &m) {
     // m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name"));
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&)>(&Producer), py::arg("dims"), py::arg("name") = "");
-    
+
 }
 
 
 void init_Producer(py::module &m) {
     py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, Operator>(
-        m, 
-        "ProducerOp", 
+        m,
+        "ProducerOp",
         py::multiple_inheritance())
-    .def("dims", &Producer_Op::dims);
+    .def("dims", &Producer_Op::dims)
+    .def("set_output_tensor", &Producer_Op::setOutputTensor);
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = "");
-    
+
     declare_Producer<1>(m);
     declare_Producer<2>(m);
     declare_Producer<3>(m);
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 78418d51a5c410cb56bb8421fd7f3dc6ec6d32db..d1287c0a928ae2ad27a839cec1c3d3955da65538 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -17,7 +17,7 @@ namespace Aidge {
 void init_Data(py::module&);
 void init_Tensor(py::module&);
 void init_OperatorImpl(py::module&);
-void init_Parameterizable(py::module&);
+void init_Attributes(py::module&);
 void init_Operator(py::module&);
 
 void init_Add(py::module&);
@@ -28,7 +28,7 @@ void init_ConvDepthWise(py::module&);
 void init_FC(py::module&);
 void init_GenericOperator(py::module&);
 void init_LeakyReLU(py::module&);
-void init_Matmul(py::module&);
+void init_MatMul(py::module&);
 void init_MaxPooling(py::module&);
 void init_Producer(py::module&);
 void init_ReLU(py::module&);
@@ -46,7 +46,7 @@ void init_GRegex(py::module&);
 void init_Recipies(py::module&);
 
 void init_Scheduler(py::module&);
-
+void init_TensorUtils(py::module&);
 
 void set_python_flag(){
     // Set an env variable to know if we run with ypthon or cpp
@@ -65,7 +65,7 @@ void init_Aidge(py::module& m){
     init_Connector(m);
 
     init_OperatorImpl(m);
-    init_Parameterizable(m);
+    init_Attributes(m);
     init_Operator(m);
     init_Add(m);
     init_AvgPooling(m);
@@ -75,7 +75,7 @@ void init_Aidge(py::module& m){
     init_FC(m);
     init_GenericOperator(m);
     init_LeakyReLU(m);
-    init_Matmul(m);
+    init_MatMul(m);
     init_MaxPooling(m);
     init_ReLU(m);
     init_Softmax(m);
@@ -86,6 +86,7 @@ void init_Aidge(py::module& m){
     init_GRegex(m);
     init_Recipies(m);
     init_Scheduler(m);
+    init_TensorUtils(m);
 }
 
 PYBIND11_MODULE(aidge_core, m) {
diff --git a/python_binding/recipies/pybind_Recipies.cpp b/python_binding/recipies/pybind_Recipies.cpp
index b4147dcb4fb82dbfe9f5b4605604725c6945ece9..93c131ef7417135bfdbc657c5c809339430616ed 100644
--- a/python_binding/recipies/pybind_Recipies.cpp
+++ b/python_binding/recipies/pybind_Recipies.cpp
@@ -20,24 +20,51 @@ namespace py = pybind11;
 
 namespace Aidge {
 void init_Recipies(py::module &m) {
-  m.def("fuse_mul_add", &fuseMulAdd, py::arg("nodes"), R"mydelimiter(
-    Recipie to Fuse MatMul and Add operators into an `aidge.FC` operator.
-    
-    Parameters
-    ----------
+
+
+  m.def("fuse_mul_add", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseMulAdd), py::arg("graph_view"), R"mydelimiter(
+    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+
+    :param graph_view: Graph view on which we want to apply the recipie
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
+  m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
+    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+
     :param nodes: The MatMul and Add nodes to fuse.
-    :type nodes: list of `aidge.node`
+    :type nodes: list of :py:class:`aidge_core.Node`
+    )mydelimiter");
+
+  m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
+    Recipie to remove a flatten operator.
 
+    :param graph_view: Graph view on which we want to apply the recipie
+    :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
-  m.def("remove_flatten", &removeFlatten, py::arg("nodes"), R"mydelimiter(
+  m.def("remove_flatten", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(removeFlatten), py::arg("nodes"), R"mydelimiter(
     Recipie to remove a flatten operator.
-    
-    Parameters
-    ----------
+
     :param nodes: The flatten operator to remove.
-    :type nodes: list of `aidge.node`
+    :type nodes: list of :py:class:`aidge_core.Node`
+    )mydelimiter");
+  m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
+    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
+    :param nodes: The MatMul and Add nodes to fuse.
+    :type nodes: list of :py:class:`aidge_core.Node`
+    )mydelimiter");
+
+  m.def("fuse_batchnorm", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseBatchNorm), py::arg("graph_view"), R"mydelimiter(
+    Recipie to remove a flatten operator.
+
+    :param graph_view: Graph view on which we want to apply the recipie
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
+  m.def("fuse_batchnorm", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseBatchNorm), py::arg("nodes"), R"mydelimiter(
+    Recipie to remove a flatten operator.
+
+    :param nodes: The flatten operator to remove.
+    :type nodes: list of :py:class:`aidge_core.Node`
     )mydelimiter");
-  
 }
 } // namespace Aidge
diff --git a/python_binding/utils/pybind_Parameter.cpp b/python_binding/utils/pybind_Parameter.cpp
index 358316ea00413813d6d482a8a4601e69af3aa992..2957876f31ad0781a36905cef3a5ae88934b6a8a 100644
--- a/python_binding/utils/pybind_Parameter.cpp
+++ b/python_binding/utils/pybind_Parameter.cpp
@@ -1,12 +1,36 @@
 #include <pybind11/pybind11.h>
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
-void init_Parameterizable(py::module& m){
-    py::class_<PyAbstractParametrizable, std::shared_ptr<PyAbstractParametrizable>>(m, "PyAbstractParametrizable")
-    .def("get", &PyAbstractParametrizable::getPy, py::arg("name"))
-    ;
+DynamicAttributes test_DynamicAttributes_binding() {
+    DynamicAttributes attrs;
+    attrs.addAttr<int>("a", 42);
+    attrs.addAttr<std::string>("b", "test");
+    attrs.addAttr<std::vector<bool>>("c", {true, false, true});
+    return attrs;
 }
+
+double test_DynamicAttributes_binding_check(DynamicAttributes& attrs) {
+    return attrs.getAttr<double>("d");
+}
+
+void init_Attributes(py::module& m){
+    py::class_<Attributes, std::shared_ptr<Attributes>>(m, "Attributes")
+    .def("has_attr", &Attributes::hasAttr, py::arg("name"))
+    .def("get_attr_type", &Attributes::getAttrType, py::arg("name"))
+    .def("get_attrs_name", &Attributes::getAttrsName)
+    .def("get_attr", &Attributes::getAttrPy, py::arg("name"));
+
+    py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
+    .def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
+    .def("set_attr", &DynamicAttributes::setAttrPy, py::arg("name"), py::arg("value"))
+    .def("del_attr", &DynamicAttributes::delAttr, py::arg("name"));
+
+    m.def("test_DynamicAttributes_binding", &test_DynamicAttributes_binding);
+    m.def("test_DynamicAttributes_binding_check", &test_DynamicAttributes_binding_check, py::arg("attrs"));
+}
+
 }
 
diff --git a/python_binding/utils/pybind_TensorUtils.cpp b/python_binding/utils/pybind_TensorUtils.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..78825a5f3b8d45f22f76c57bd780dc7019fbc123
--- /dev/null
+++ b/python_binding/utils/pybind_TensorUtils.cpp
@@ -0,0 +1,57 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include <string>
+
+#include "aidge/utils/TensorUtils.hpp"
+
+namespace py = pybind11;
+
+namespace Aidge {
+
+template<typename T>
+void addTensorUtilsFunction(py::module &m){
+    m.def("approx_eq",
+    & approxEq<T>,
+    py::arg("t1"),
+    py::arg("t2"),
+    py::arg("relative"),
+    py::arg("absolute"),
+    R"mydelimiter(
+        Compare two :cpp:class:`Aidge::Tensor` value wise. The comparison function is:
+            |t1-t2| <= absolute + relative * |t2|
+
+        If a tensor value is different from the other tensor return False
+        If the tensor does not have the same size, return False
+        If the datatype is not the same between each tensor return False
+        If the templated type does not correspond to the datatype of each tensor, raise an assertion error
+
+        :param t1: first tensor to test
+        :type t1: :py:class:`aidge_core.Tensor`
+        :param t2: second tensor to test
+        :type t2: :py:class:`aidge_core.Tensor`
+        :param relative: relative difference allowed (should be betwen 0 and 1)
+        :type relative: float
+        :param absolute: absolute error allowed (shoulmd be positive)
+        :type absolute: float
+        )mydelimiter");
+}
+
+void init_TensorUtils(py::module &m) {
+    addTensorUtilsFunction<float>(m);
+    addTensorUtilsFunction<double>(m);
+    addTensorUtilsFunction<int>(m);
+    addTensorUtilsFunction<long>(m);
+}
+} // namespace Aidge
diff --git a/setup.py b/setup.py
index 0b0f66e9132d66cdb6385d7f8c6c69ae0cc5d0e3..16305afdfdfa5de2e328460d9e96c77eb96a9d98 100644
--- a/setup.py
+++ b/setup.py
@@ -62,11 +62,11 @@ class CMakeBuild(build_ext):
 
         os.chdir(str(build_temp))
 
-        # Impose to use the executable of the python 
+        # Impose to use the executable of the python
         # used to launch setup.py to setup PythonInterp
         param_py = "-DPYTHON_EXECUTABLE=" + sys.executable
-        
-        install_path = f"{build_temp}/install" if "AIDGE_INSTALL" not in os.environ else os.environ["AIDGE_INSTALL"]
+
+        install_path = os.path.join(sys.prefix, "lib", "libAidge")  if "AIDGE_INSTALL" not in os.environ else os.environ["AIDGE_INSTALL"]
 
         self.spawn(['cmake', str(cwd), param_py, '-DTEST=OFF', f'-DCMAKE_INSTALL_PREFIX:PATH={install_path}'])
         if not self.dry_run:
@@ -83,11 +83,11 @@ class CMakeBuild(build_ext):
             for file in files:
                 if file.endswith('.so') and (root != str(aidge_package.absolute())):
                     currentFile=os.path.join(root, file)
-                    shutil.copy(currentFile, str(aidge_package.absolute())) 
+                    shutil.copy(currentFile, str(aidge_package.absolute()))
 
         # Copy version.txt in aidge_package
         os.chdir(os.path.dirname(__file__))
-        shutil.copy("version.txt", str(aidge_package.absolute()))    
+        shutil.copy("version.txt", str(aidge_package.absolute()))
 
 
 if __name__ == '__main__':
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index dbb64177676e414b131aa00af898b8542024bad9..8f8f51c89bbcc380963f355f781e8fda940dcffc 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -33,13 +33,10 @@ Aidge::Connector Aidge::GraphView::operator()(
     (void)input; // avoid unused warning
   }
 
+  IOIndex_t inID = 0;
   for (const Connector &ctor : ctors) {
     assert((ctor.node() != nullptr) &&
            "Input Connector must be associated with a node");
-    (void)ctors; // avoid unused warning
-  }
-  IOIndex_t inID = 0;
-  for (const Connector &ctor : ctors) {
     ctor.node()->addChild(shared_from_this(), static_cast<std::size_t>(ctor.index()),
                           {inNode, inID++});
   }
@@ -189,7 +186,7 @@ void Aidge::GraphView::forwardDims() {
             {
               assert(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty());
             }
-            
+
         }
     }
     // Compute dimensions of every node
@@ -514,39 +511,47 @@ void Aidge::GraphView::link(std::string /*name1_inID*/,
   printf("Not implemented yet.\n");
 }
 
-void Aidge::GraphView::insert(Node & /*newNode*/, Node & /*inNode*/,
-                             std::initializer_list<Node> /*outNodes*/,
-                             IOIndex_t /*tensorIdx*/) {
-  printf("Not implemented yet.\n");
+void Aidge::GraphView::insertParent(NodePtr childNode,
+                  NodePtr newParentNode,
+                  IOIndex_t childInputTensorIdx,
+                  IOIndex_t newParentInputTensorIdx,
+                  IOIndex_t newParentOutputTensorIdx){
+  NodePtr currentParentNode = childNode->getParent(childInputTensorIdx);
+  const IOIndex_t currentParentOutputTensorIdx = childNode->input(childInputTensorIdx).second;
+  // Remove child from current parent & current Parent from child
+  currentParentNode->removeChild(childNode, currentParentOutputTensorIdx);
+
+  // Add child
+  currentParentNode->addChild(newParentNode,currentParentOutputTensorIdx, newParentInputTensorIdx);
+  newParentNode->addChild(childNode, newParentOutputTensorIdx, childInputTensorIdx);
+
+  add(newParentNode);
 }
 
+
 bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
   // TODO : only supports one input/output node for now
   assert(mNodes.size()>0 && "There must be at least one Node to replace");
 
   bool replacable;
-  std::shared_ptr<Node> previousInputNode;
-  std::shared_ptr<Node> newInputNode;
-  std::shared_ptr<Node> previousOutputNode;
+  std::shared_ptr<Node> previousInputNode = (*inputNodes().begin());
+  std::shared_ptr<Node> previousOutputNode = (*outputNodes().begin());
   std::shared_ptr<Node> newOutputNode;
-  
+
   auto gNew = std::make_shared<GraphView>();
   gNew->add(newNodes, false);
 
   if (newNodes.empty()) {
     replacable = (outputNodes().size() == 1) &&
-                      (inputNodes().size() == 1) &&
-                      ((*outputNodes().begin())->nbOutputs() == 1) &&
-                      ((*inputNodes().begin())->nbInputs() == 1);
-    previousOutputNode = (*outputNodes().begin());
-    previousInputNode = (*inputNodes().begin());
+                 (inputNodes().size() == 1) &&
+                 ((*outputNodes().begin())->nbOutputs() == 1) &&
+                 ((*inputNodes().begin())->nbDataInputs() == 1);
     newOutputNode = previousInputNode->input(0).first;
   } else {
-    replacable = ((outputNodes().size() == gNew->outputNodes().size()) &&
-                     (outputNodes().size() == 1));
-    previousOutputNode = (*outputNodes().begin());
     newOutputNode = (*gNew->outputNodes().begin());
-    replacable = replacable && (previousOutputNode->nbOutputs() == newOutputNode->nbOutputs());
+    replacable = (outputNodes().size() == gNew->outputNodes().size()) &&
+                 (outputNodes().size() == 1) &&
+                 (previousOutputNode->nbOutputs() == newOutputNode->nbOutputs());
   }
 
   if (replacable) {
@@ -665,4 +670,55 @@ void Aidge::GraphView::removeOutputNode(const std::string nodeName) {
       mOutputNodes.erase(val);
     }
   }
-}
\ No newline at end of file
+}
+
+std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*cloneNode)(NodePtr)) const {
+  std::shared_ptr<GraphView> newGraph = std::make_shared<GraphView>(mName);
+
+  // Map for old node -> new node correspondance
+  std::map<NodePtr, NodePtr> oldToNewNodes;
+
+  for (const std::shared_ptr<Node> &node_ptr : mNodes) {
+    oldToNewNodes[node_ptr] = cloneNode(node_ptr);
+  }
+
+  // For each node, convert old node -> new node connections
+  for (auto &oldToNewNode : oldToNewNodes) {
+    if (oldToNewNode.second == nullptr)
+      continue;  // deleted node
+
+    // Add new node to new GraphView
+    newGraph->add(oldToNewNode.second, false);
+
+    // Connect parent nodes. Nodes that were removed with cloneNode() are set to nullptr
+    size_t parentId = 0;
+    for (auto parent : oldToNewNode.first->inputs()) {
+      while (oldToNewNodes[parent.first] == nullptr) {
+        // Find next valid parent in line, going backward in the graph
+        assert(parent.first->nbDataInputs() <= 1 && "deleted nodes in GraphView::clone() cannot have multiple data inputs");
+        const auto& parents = parent.first->inputs();
+
+        if (!parents.empty() && parents[0].first != nullptr // a valid parent exists
+          && oldToNewNodes.find(parents[0].first) != oldToNewNodes.end()) // parent is in the GraphView
+        {
+          parent = parents[0];
+        }
+        else {
+          break;
+        }
+      }
+
+      if (oldToNewNodes[parent.first]) {
+        oldToNewNodes[parent.first]->addChild(oldToNewNode.second, parent.second, parentId);
+      }
+
+      ++parentId;
+    }
+  }
+
+  // Update OutputNodes/inputNodes
+  newGraph->updateInputNodes();
+  newGraph->updateOutputNodes();
+
+  return newGraph;
+}
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index abf572831d8f0b5c2c5eb836ea46e05b8114da55..54fdac808642f3ae603e237737e265ba394fccbd 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -321,6 +321,26 @@ void Aidge::Node::resetConnections(bool includeLearnableParam) {
     }
 }
 
+  ///////////////////////////////////////////////////////
+  //        CLONE
+  ///////////////////////////////////////////////////////
+
+Aidge::NodePtr Aidge::Node::cloneSharedOperators() const {
+    return std::make_shared<Node>(mOperator, mName);
+}
+
+Aidge::NodePtr Aidge::Node::cloneSharedProducers() const {
+    std::shared_ptr<Operator> op = (mOperator->type() == Producer_Op::Type)
+        ? mOperator
+        : mOperator->clone();
+
+    return std::make_shared<Node>(op, mName);
+}
+
+Aidge::NodePtr Aidge::Node::clone() const {
+    return std::make_shared<Node>(mOperator->clone(), mName);
+}
+
 /////////////////////////////////////////////////////////////////////////////////////////////
 // private
 
diff --git a/src/graphmatching/NodeRegex.cpp b/src/graphmatching/NodeRegex.cpp
index bbb116d1b12a31b491b26d2a64d04b416b61c6b7..9bf164f60255c17492e528b0f27dec8c53f74979 100644
--- a/src/graphmatching/NodeRegex.cpp
+++ b/src/graphmatching/NodeRegex.cpp
@@ -12,7 +12,7 @@
 #include "aidge/graphmatching/NodeRegex.hpp"
 
 
-// Verification done by the Parameter system
+// Verification done by the Attribute system
 
 
 // Version 1 - Only test the type of the node (no need for a lexer)
@@ -39,8 +39,8 @@ bool Aidge::NodeRegex::isA(std::string NodeType){
 /**bool NodeRegex::_is(string &Node_op){
     // Parsing the condition is done in the initialization of the NodeRegex
     
-    // assert parameters exist in the node with the parameter function isParam()
+    // assert attributes exist in the node with the attribute function hasAttr()
 
-    // get the parameters
+    // get the attributes
 
 }*/
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..192036651cfbe2df71139dd63ca3d71f07300964
--- /dev/null
+++ b/src/operator/GenericOperator.cpp
@@ -0,0 +1,17 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <vector>
+
+#include "aidge/operator/GenericOperator.hpp"
+
+const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Identity
+    = [](const std::vector<std::vector<size_t>>& inputsDims) { return inputsDims; };
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index b3896b12143488275b2a064819595c380da62844..09a17a428e1de91c0318f710e6f097573cf529a6 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -42,6 +42,14 @@ void Aidge::Operator::updateConsummerProducer(){
     mImpl->updateConsummerProducer();
 }
 
-void Aidge::Operator::forward() { mImpl->forward(); }
+void Aidge::Operator::runHooks() const {
+    for (auto& hook : mHooks) {
+        hook.second->call();
+    }
+}
+void Aidge::Operator::forward() {
+    mImpl->forward();
+    runHooks();
+}
 
 void Aidge::Operator::backward() { mImpl->backward(); }
diff --git a/src/recipies/FuseBatchNorm.cpp b/src/recipies/FuseBatchNorm.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3a50ec3e7f83517267ef4ad04cb2c855f8f9df7e
--- /dev/null
+++ b/src/recipies/FuseBatchNorm.cpp
@@ -0,0 +1,146 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <set>
+#include <cassert>
+#include <memory>
+#include <string>
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/Conv.hpp"
+
+#include "aidge/utils/Recipies.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+// Graph Regex
+#include "aidge/graphmatching/GRegex.hpp"
+#include "aidge/graphmatching/NodeRegex.hpp"
+using namespace Aidge;
+
+void Aidge::fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes){
+
+    assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
+
+    // Assert the nodes types are correct to be fused
+    std::shared_ptr<Node> conv;
+    std::shared_ptr<Node> batchnorm;
+    for (const auto& element : nodes) {
+        assert((element->type() == "Conv" || element->type() == "BatchNorm") && "Wrong type for the nodes to replace");
+        if (element->type() == "Conv"){
+            conv = element;
+        }
+        else if (element->type() == "BatchNorm") {
+            batchnorm = element;
+        }
+    }
+    // TODO : check if batchnorm is the only child of the Conv or FC
+    std::shared_ptr<Tensor> scale  = batchnorm->input(1).first->getOperator()->getOutput(batchnorm->input(1).second);
+    std::shared_ptr<Tensor> shift  = batchnorm->input(2).first->getOperator()->getOutput(batchnorm->input(2).second);
+    std::shared_ptr<Tensor> b_mean = batchnorm->input(3).first->getOperator()->getOutput(batchnorm->input(3).second);
+    std::shared_ptr<Tensor> b_var  = batchnorm->input(4).first->getOperator()->getOutput(batchnorm->input(4).second);
+
+
+    // TODO : Find a way to remove the template
+    const float epsilon = std::static_pointer_cast<BatchNorm_Op<2>>(batchnorm->getOperator())->getAttr<float>("Epsilon");
+    DimSize_t convOutDims = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("OutChannels");
+
+
+    assert(scale->size()  == convOutDims);
+    assert(shift->size()  == convOutDims);
+    assert(b_mean->size() == convOutDims);
+    assert(b_var->size()  == convOutDims);
+    assert(epsilon > 0.0);
+    // TODO : no no_bias attribute ?
+    float meanVariance = 0.0;
+    unsigned int count = 0;
+
+    for (std::size_t output = 0; output < convOutDims; ++output) {
+        // TODO : get suppose datatype is float ..
+        if (b_var->get<float>(output) > 1.0e-12) {
+            meanVariance += b_var->get<float>(output);
+            ++count;
+        }
+        else {
+            printf("Zero-variance: %s [%lu]\n", conv->name().c_str(), output);
+        }
+    }
+    if (count > 0)
+        meanVariance /= count;
+    else {
+        printf("variance < 1e-12 for all outputs! Is the network correctly trained?\n");
+    }
+
+    const DimSize_t channelsSize = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("InChannels");
+
+    // TODO : suppose we have Conv2D ...
+    const std::array<DimSize_t, 2> kernelDims = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+
+    std::shared_ptr<Tensor> weight  = conv->input(1).first->getOperator()->getOutput(conv->input(1).second);
+    std::shared_ptr<Tensor> bias  = conv->input(2).first->getOperator()->getOutput(conv->input(2).second);
+
+    for (std::size_t output = 0; output < convOutDims; ++output) {
+        // Corrected for zero-variance issue:
+        // "A Quantization-Friendly Separable Convolution for MobileNets"
+        // https://arxiv.org/pdf/1803.08607.pdf
+        // to help post-training quantization
+        const float factor = scale->get<float>(output)
+            / std::sqrt(epsilon + ((b_var->get<float>(output) > 1.0e-12 || count == 0)
+                        ? b_var->get<float>(output) : meanVariance));
+        // Weights adjustments
+        for (std::size_t channel = 0; channel < channelsSize; ++channel) {
+            // TODO : Suppose kerneldims = 2
+            for(std::size_t k0 = 0; k0 < kernelDims[0]; ++ k0){
+                for(std::size_t k1 = 0; k1 < kernelDims[1]; ++ k1){
+                    std::vector<DimSize_t> currentIdx = {output, channel, k0, k1};
+                    // TODO : suppose weights are float
+                    float weightValue = weight->get<float>(currentIdx);
+                    weight->set<float>(currentIdx, weightValue*factor); // Update check it update Conv weights
+                }
+            }
+        }
+
+        // TODO : check if noBias==true is set, then set biasValue to 0
+        float biasValue = bias->get<float>(output);
+
+        biasValue = shift->get<float>(output) + (biasValue - b_mean->get<float>(output)) * factor;
+
+        bias->set<float>(output, biasValue);
+
+    }
+    auto g = std::make_shared<GraphView>();
+    g->add(std::set<std::shared_ptr<Node>>({
+        batchnorm,
+        batchnorm->input(1).first,
+        batchnorm->input(2).first,
+        batchnorm->input(3).first,
+        batchnorm->input(4).first
+    }));
+    g->replaceWith({});
+
+}
+
+void Aidge::fuseBatchNorm(std::shared_ptr<GraphView> graphView){
+    std::map<std::string,NodeRegex*> nodesRegex ;
+    nodesRegex["BatchNorm"] = new NodeRegex("BatchNorm");
+    nodesRegex["Conv"] = new NodeRegex("Conv");
+    nodesRegex["FC"] = new NodeRegex("FC");
+
+
+    std::vector<std::string> seqRegex;
+    seqRegex.push_back("Conv -> BatchNorm;"); // TODO: Add (Conv | FC)
+    GRegex GReg(nodesRegex, seqRegex);
+    Match matches = GReg.match(graphView);
+    std::vector<std::set<std::shared_ptr<Node>>> matchNodes = matches.getMatchNodes();
+    for (size_t i = 0; i < matches.getNbMatch(); ++i) {
+        fuseBatchNorm(matchNodes[i]);
+    }
+}
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipies/FuseMulAdd.cpp
index 561d25776a28f1aad8f8c943711887ec6661a10c..1de79890f9b597c4baff7427e01d7217f9695a44 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipies/FuseMulAdd.cpp
@@ -20,21 +20,18 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/GenericOperator.hpp"
-
+// Graph Regex
+#include "aidge/graphmatching/GRegex.hpp"
+#include "aidge/graphmatching/NodeRegex.hpp"
 using namespace Aidge;
 
-/**
- * @brief Merge MatMul and Add Node into FC.
- * 
- * @param nodes Strict set of Node to merge.
- */
 void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
     // Fuse Mulmat & Add into FC
     // Inputs : old nodes (pointers on mul & add)
-    
+
     assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
     // Too bad we lose information on the type after matching, how to keep the information after matching (not only for the type) ?
-    
+
     // Step 0 : Assert the nodes types are correct to be fused
     std::shared_ptr<Node> add;
     std::shared_ptr<Node> matmul;
@@ -53,7 +50,7 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
     auto producer_add_bias = add->input(1);
     Tensor& bias_tensor = (producer_add_bias.first)->getOperator()->output(0);
 
-    // Instanciate FC  
+    // Instanciate FC
     //std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
     std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(bias_tensor.dims()[0], false));
 
@@ -61,10 +58,12 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
     // link weights & bias
     if (matmul->getParent(1)==nullptr) {
         matmul->getParent(0)->addChild(fc, 0, 1);
+        printf("MatMul out[1] == nullptr !\n");
     } else {
+        printf("MatMul out[1] != nullptr !\n");
         if (matmul->getParent(0)!=nullptr)
             matmul->getParent(0)->addChild(fc, 0, 0);
-        matmul->getParent(1)->addChild(fc, 0, 1);
+        matmul->input(1).first->addChild(fc, 0, 1);
     }
     (producer_add_bias.first)->addChild(fc,0,2);
 
@@ -74,7 +73,22 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
         // Case 2 : If not all nodes are in a graph view : only delete the nodes from the graphview
         // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory ?
     auto nodeToReplace = std::make_shared<GraphView>();
-    nodeToReplace->add(nodes);
+    nodeToReplace->add(nodes, false);
     nodeToReplace->replaceWith({fc});
 
-}
\ No newline at end of file
+}
+
+void Aidge::fuseMulAdd(std::shared_ptr<GraphView> graphView){
+
+    std::map<std::string,NodeRegex*> nodesRegex ;
+    nodesRegex["MatMul"] = new NodeRegex("MatMul");
+    nodesRegex["Add"] = new NodeRegex("Add");
+    std::vector<std::string> seqRegex;
+    seqRegex.push_back("MatMul -> Add;");
+    GRegex GReg(nodesRegex, seqRegex);
+    Match matches = GReg.match(graphView);
+    std::vector<std::set<std::shared_ptr<Node>>> matchNodes = matches.getMatchNodes();
+    for (size_t i = 0; i < matches.getNbMatch(); ++i) {
+        fuseMulAdd(matchNodes[i]);
+    }
+}
diff --git a/src/recipies/LabelGraph.cpp b/src/recipies/LabelGraph.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..369336f7981198f962d8ab949309005be9ac5eb9
--- /dev/null
+++ b/src/recipies/LabelGraph.cpp
@@ -0,0 +1,56 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/recipies/LabelGraph.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/AvgPooling.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+
+Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
+    // Conv => MaxPooling
+    if (node->type() == Conv_Op<2>::Type) {
+        auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator());
+
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<ConvAttr::KernelDims>(), op->getAttr<ConvAttr::StrideDims>());
+        return std::make_shared<Node>(newOp, node->name());
+    }
+
+    // ConvDepthWise => MaxPooling
+    if (node->type() == ConvDepthWise_Op<2>::Type) {
+        auto op = std::dynamic_pointer_cast<ConvDepthWise_Op<2>>(node->getOperator());
+
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<ConvDepthWiseAttr::KernelDims>(), op->getAttr<ConvDepthWiseAttr::StrideDims>());
+        return std::make_shared<Node>(newOp, node->name());
+    }
+
+    // AvgPooling => MaxPooling
+    if (node->type() == AvgPooling_Op<2>::Type) {
+        auto op = std::dynamic_pointer_cast<AvgPooling_Op<2>>(node->getOperator());
+
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<AvgPoolingAttr::KernelDims>(), op->getAttr<AvgPoolingAttr::StrideDims>());
+        return std::make_shared<Node>(newOp, node->name());
+    }
+
+    // MaxPooling => MaxPooling
+    if (node->type() == MaxPooling_Op<2>::Type) {
+        return node->clone();
+    }
+
+    // By default, remove the node from the graph
+    return nullptr;
+}
+
+std::shared_ptr<Aidge::GraphView> Aidge::labelGraph(std::shared_ptr<GraphView> graph) {
+    return graph->cloneCallback(&nodeLabel);
+}
diff --git a/src/recipies/RemoveFlatten.cpp b/src/recipies/RemoveFlatten.cpp
index cc3c3324e40636a1edcbc73cdc4a9dcfeec8a026..9096c107ba505f5f18993a761273552408db721b 100644
--- a/src/recipies/RemoveFlatten.cpp
+++ b/src/recipies/RemoveFlatten.cpp
@@ -15,10 +15,38 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/utils/Recipies.hpp"
 
+// Graph Regex
+#include "aidge/graphmatching/GRegex.hpp"
+#include "aidge/graphmatching/NodeRegex.hpp"
+
+
 namespace Aidge {
     void removeFlatten(std::set<std::shared_ptr<Node>> nodes) {
+        assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
+        std::shared_ptr<Node> flatten;
+        for (const auto& element : nodes) {
+            assert((element->type() == "FC" || element->type() == "Flatten") && "Wrong type for the nodes to replace");
+            if (element->type() == "Flatten"){
+                flatten = element;
+            }
+        }
         auto g = std::make_shared<GraphView>();
-        g->add(std::set<std::shared_ptr<Node>>({nodes}));
+        // TODO : avoid using replace_with and use a remove method instead
+        g->add(std::set<std::shared_ptr<Node>>({flatten}));
         g->replaceWith({});
     }
-}
\ No newline at end of file
+
+    void removeFlatten(std::shared_ptr<GraphView> graphView){
+        std::map<std::string,NodeRegex*> nodesRegex ;
+        nodesRegex["Flatten"] = new NodeRegex("Flatten");
+        nodesRegex["FC"] = new NodeRegex("FC");
+        std::vector<std::string> seqRegex;
+        seqRegex.push_back("Flatten->FC;");
+        GRegex GReg(nodesRegex, seqRegex);
+        Match matches = GReg.match(graphView);
+        std::vector<std::set<std::shared_ptr<Node>>> matchNodes = matches.getMatchNodes();
+        for (size_t i = 0; i < matches.getNbMatch(); ++i) {
+            removeFlatten(matchNodes[i]);
+        }
+    }
+}
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index bf46f161ce26936adc888791fe5823715099eacf..9392894148bfbe81558acc255cd133341e023998 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -34,6 +34,11 @@ void drawProgressBar(double progress, int barWidth, const std::string& additiona
 }
 
 void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
+    // TODO: For loop on the list of node to run
+    // run sequencially every runnable consumers once
+    // TODO: handle memory allocation in scheduler
+    // TODO: optimize memory usage
+
     // setup initial producers list
     mComputationNumber = 0;
     std::set<std::shared_ptr<Node>> producers;
@@ -74,16 +79,16 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
                        "\n\t\tR/C:\t",
                        (consumer->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(consumer.get()))).c_str());
                 for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) {
-                    printf("%ld/%ld\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
+                    printf("%zu/%zu\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
                            consumer->getOperator()->getNbRequiredData(inId));
                 }
-                printf("%ld/%ld", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
+                printf("%zu/%zu", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
                        consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
                 printf("\n\t\tP:\t");
                 for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) {
-                    printf("%ld\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
+                    printf("%zu\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
                 }
-                printf("%ld", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
+                printf("%zu", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
                 printf("\n");
             }
             bool isRunnable = true;
@@ -124,13 +129,13 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
                     printf("%ld/%ld\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
                            consumer->getOperator()->getNbRequiredData(inId));
                 }
-                printf("%ld/%ld", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
+                printf("%zu/%zu", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
                        consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
                 printf("\n\t\tP:\t");
                 for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) {
-                    printf("%ld\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
+                    printf("%zu\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
                 }
-                printf("%ld", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
+                printf("%zu", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
                 printf("\n");
             }
             bool isStillConsumer = false;
@@ -182,35 +187,20 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
     mStaticSchedule.clear();
 
     this->generateScheduling();
-
-    // TODO: For loop on the list of node to run
-    // run sequencially every runnable consumers once
-    // TODO: handle memory allocation in scheduler
-    // TODO: optimize memory usage
+    int cpt = 0;
     for (const auto& runnable : mStaticSchedule) {
-        bool computationOverForConsumer = true;
-        for (IOIndex_t parentIDi = 0; parentIDi < runnable->nbInputs(); ++parentIDi) {
-            if (runnable->getOperator()->getNbConsumedData(parentIDi) <
-                runnable->getOperator()->getNbRequiredData(parentIDi)) {
-                computationOverForConsumer = false;
-                break;
-            }
-        }
-        if (computationOverForConsumer) {
-            computationOver.insert(runnable);
-        }
-
         if (verbose)
             printf("run: %s\n",
                     (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str());
         else
-            drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(mComputationNumber), 50,
+            drawProgressBar(static_cast<float>(cpt) / static_cast<float>(mStaticSchedule.size()), 50,
                             (std::string("running ") + runnable->type() + "_" +
                                 std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))));
         const auto tStart = std::chrono::high_resolution_clock::now();
         runnable->forward();
         const auto tEnd = std::chrono::high_resolution_clock::now();
         mScheduling.push_back(SchedulingElement(runnable, tStart, tEnd));
+        cpt++;
     }
     if (!verbose) drawProgressBar(1.0, 50, "                                   ");
     printf("\n");
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index a37c9441723f019f3cae858578984a7c13b5929d..9f014364636c70031b522b09c893e1144af3f133 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -330,4 +330,276 @@ TEST_CASE("[core/graph] GraphView(replaceWith)") {
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({r1, r4}));
         REQUIRE((r1->output(0))[0].first == r4);
     }
-}
\ No newline at end of file
+}
+
+TEST_CASE("[GraphView] clone") {
+    auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
+    auto conv1 = Conv(3, 32, {3, 3}, "conv1");
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2");
+    auto conv3 = Conv(64, 10, {1, 1}, "conv3");
+    auto g1 = std::make_shared<GraphView>("TestGraph");
+    dataProvider->addChild(conv1, 0);
+    g1->add(conv1);
+    g1->addChild(conv2, conv1, 0);
+    g1->addChild(conv3, conv2, 0);
+    g1->save("clone_g1");
+
+    SECTION("Check input-output connections") {
+        REQUIRE(dataProvider->getOperator()->getOutput(0) == conv1->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getInput(1) == g1->getNode("conv1_w")->getOperator()->getOutput(0));
+        REQUIRE(conv1->getOperator()->getInput(2) == g1->getNode("conv1_b")->getOperator()->getOutput(0));
+        REQUIRE(conv1->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
+        REQUIRE(conv2->getOperator()->getInput(1) == g1->getNode("conv2_w")->getOperator()->getOutput(0));
+        REQUIRE(conv2->getOperator()->getInput(2) == g1->getNode("conv2_b")->getOperator()->getOutput(0));
+        REQUIRE(conv2->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
+        REQUIRE(conv3->getOperator()->getInput(1) == g1->getNode("conv3_w")->getOperator()->getOutput(0));
+        REQUIRE(conv3->getOperator()->getInput(2) == g1->getNode("conv3_b")->getOperator()->getOutput(0));
+    }
+
+    auto g2 = g1->clone();
+
+    auto dataProvider2 = Producer({16, 3, 224, 224}, "dataProvider");
+    dataProvider2->addChild(g2->getNode("conv1"), 0);
+
+    g2->forwardDims();
+    g2->save("clone_g2");
+
+    SECTION("Check node cloning") {
+        REQUIRE(g1->getNode("conv1") != g2->getNode("conv1"));
+        REQUIRE(g1->getNode("conv1_w") != g2->getNode("conv1_w"));
+        REQUIRE(g1->getNode("conv1_b") != g2->getNode("conv1_b"));
+        REQUIRE(g1->getNode("conv2") != g2->getNode("conv2"));
+        REQUIRE(g1->getNode("conv2_w") != g2->getNode("conv2_w"));
+        REQUIRE(g1->getNode("conv2_b") != g2->getNode("conv2_b"));
+        REQUIRE(g1->getNode("conv3") != g2->getNode("conv3"));
+        REQUIRE(g1->getNode("conv3_w") != g2->getNode("conv3_w"));
+        REQUIRE(g1->getNode("conv3_b") != g2->getNode("conv3_b"));
+    }
+
+    SECTION("Check operator cloning") {
+        REQUIRE(g1->getNode("conv1")->getOperator() != g2->getNode("conv1")->getOperator());
+        REQUIRE(g1->getNode("conv1_w")->getOperator() != g2->getNode("conv1_w")->getOperator());
+        REQUIRE(g1->getNode("conv1_b")->getOperator() != g2->getNode("conv1_b")->getOperator());
+        REQUIRE(g1->getNode("conv2")->getOperator() != g2->getNode("conv2")->getOperator());
+        REQUIRE(g1->getNode("conv2_w")->getOperator() != g2->getNode("conv2_w")->getOperator());
+        REQUIRE(g1->getNode("conv2_b")->getOperator() != g2->getNode("conv2_b")->getOperator());
+        REQUIRE(g1->getNode("conv3")->getOperator() != g2->getNode("conv3")->getOperator());
+        REQUIRE(g1->getNode("conv3_w")->getOperator() != g2->getNode("conv3_w")->getOperator());
+        REQUIRE(g1->getNode("conv3_b")->getOperator() != g2->getNode("conv3_b")->getOperator());
+    }
+
+    SECTION("Check new connections") {
+        REQUIRE(dataProvider->getOperator()->getOutput(0) != g2->getNode("conv1")->getOperator()->getInput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getInput(1) != g2->getNode("conv1_w")->getOperator()->getOutput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getInput(2) != g2->getNode("conv1_b")->getOperator()->getOutput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getOutput(0) != g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getInput(1) != g2->getNode("conv2_w")->getOperator()->getOutput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getInput(2) != g2->getNode("conv2_b")->getOperator()->getOutput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getOutput(0) != g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g1->getNode("conv3")->getOperator()->getInput(1) != g2->getNode("conv3_w")->getOperator()->getOutput(0));
+        REQUIRE(g1->getNode("conv3")->getOperator()->getInput(2) != g2->getNode("conv3_b")->getOperator()->getOutput(0));
+    }
+
+    SECTION("Check input-output connections") {
+        REQUIRE(dataProvider2->getOperator()->getOutput(0) == g2->getNode("conv1")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(1) == g2->getNode("conv1_w")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(2) == g2->getNode("conv1_b")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(1) == g2->getNode("conv2_w")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(2) == g2->getNode("conv2_b")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(1) == g2->getNode("conv3_w")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(2) == g2->getNode("conv3_b")->getOperator()->getOutput(0));
+    }
+}
+
+TEST_CASE("[GraphView] cloneSharedProducers") {
+    auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
+    auto conv1 = Conv(3, 32, {3, 3}, "conv1");
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2");
+    auto conv3 = Conv(64, 10, {1, 1}, "conv3");
+    auto g1 = std::make_shared<GraphView>("TestGraph");
+    dataProvider->addChild(conv1, 0);
+    g1->add(conv1);
+    g1->addChild(conv2, conv1, 0);
+    g1->addChild(conv3, conv2, 0);
+    g1->save("cloneSharedProducers_g1");
+
+    SECTION("Check input-output connections") {
+        REQUIRE(dataProvider->getOperator()->getOutput(0) == conv1->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getInput(1) == g1->getNode("conv1_w")->getOperator()->getOutput(0));
+        REQUIRE(conv1->getOperator()->getInput(2) == g1->getNode("conv1_b")->getOperator()->getOutput(0));
+        REQUIRE(conv1->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
+        REQUIRE(conv2->getOperator()->getInput(1) == g1->getNode("conv2_w")->getOperator()->getOutput(0));
+        REQUIRE(conv2->getOperator()->getInput(2) == g1->getNode("conv2_b")->getOperator()->getOutput(0));
+        REQUIRE(conv2->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
+        REQUIRE(conv3->getOperator()->getInput(1) == g1->getNode("conv3_w")->getOperator()->getOutput(0));
+        REQUIRE(conv3->getOperator()->getInput(2) == g1->getNode("conv3_b")->getOperator()->getOutput(0));
+    }
+
+    auto g2 = g1->cloneSharedProducers();
+
+    auto dataProvider2 = Producer({16, 3, 224, 224}, "dataProvider");
+    dataProvider2->addChild(g2->getNode("conv1"), 0);
+
+    g2->forwardDims();
+    g2->save("cloneSharedProducers_g2");
+
+    SECTION("Check node cloning") {
+        REQUIRE(g1->getNode("conv1") != g2->getNode("conv1"));
+        REQUIRE(g1->getNode("conv1_w") != g2->getNode("conv1_w"));
+        REQUIRE(g1->getNode("conv1_b") != g2->getNode("conv1_b"));
+        REQUIRE(g1->getNode("conv2") != g2->getNode("conv2"));
+        REQUIRE(g1->getNode("conv2_w") != g2->getNode("conv2_w"));
+        REQUIRE(g1->getNode("conv2_b") != g2->getNode("conv2_b"));
+        REQUIRE(g1->getNode("conv3") != g2->getNode("conv3"));
+        REQUIRE(g1->getNode("conv3_w") != g2->getNode("conv3_w"));
+        REQUIRE(g1->getNode("conv3_b") != g2->getNode("conv3_b"));
+    }
+
+    SECTION("Check operator cloning") {
+        REQUIRE(g1->getNode("conv1")->getOperator() != g2->getNode("conv1")->getOperator());
+        REQUIRE(g1->getNode("conv1_w")->getOperator() == g2->getNode("conv1_w")->getOperator());
+        REQUIRE(g1->getNode("conv1_b")->getOperator() == g2->getNode("conv1_b")->getOperator());
+        REQUIRE(g1->getNode("conv2")->getOperator() != g2->getNode("conv2")->getOperator());
+        REQUIRE(g1->getNode("conv2_w")->getOperator() == g2->getNode("conv2_w")->getOperator());
+        REQUIRE(g1->getNode("conv2_b")->getOperator() == g2->getNode("conv2_b")->getOperator());
+        REQUIRE(g1->getNode("conv3")->getOperator() != g2->getNode("conv3")->getOperator());
+        REQUIRE(g1->getNode("conv3_w")->getOperator() == g2->getNode("conv3_w")->getOperator());
+        REQUIRE(g1->getNode("conv3_b")->getOperator() == g2->getNode("conv3_b")->getOperator());
+    }
+
+    SECTION("Check new connections") {
+        REQUIRE(dataProvider->getOperator()->getOutput(0) != g2->getNode("conv1")->getOperator()->getInput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getInput(1) == g2->getNode("conv1_w")->getOperator()->getOutput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getInput(2) == g2->getNode("conv1_b")->getOperator()->getOutput(0));
+        REQUIRE(g1->getNode("conv1")->getOperator()->getOutput(0) != g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getInput(1) == g2->getNode("conv2_w")->getOperator()->getOutput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getInput(2) == g2->getNode("conv2_b")->getOperator()->getOutput(0));
+        REQUIRE(g1->getNode("conv2")->getOperator()->getOutput(0) != g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g1->getNode("conv3")->getOperator()->getInput(1) == g2->getNode("conv3_w")->getOperator()->getOutput(0));
+        REQUIRE(g1->getNode("conv3")->getOperator()->getInput(2) == g2->getNode("conv3_b")->getOperator()->getOutput(0));
+    }
+
+    SECTION("Check input-output connections") {
+        REQUIRE(dataProvider2->getOperator()->getOutput(0) == g2->getNode("conv1")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(1) == g2->getNode("conv1_w")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(2) == g2->getNode("conv1_b")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(1) == g2->getNode("conv2_w")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(2) == g2->getNode("conv2_b")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(1) == g2->getNode("conv3_w")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(2) == g2->getNode("conv3_b")->getOperator()->getOutput(0));
+    }
+}
+
+TEST_CASE("[GraphView] cloneSharedOperators") {
+    auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
+    auto conv1 = Conv(3, 32, {3, 3}, "conv1");
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2");
+    auto conv3 = Conv(64, 10, {1, 1}, "conv3");
+    auto g1 = std::make_shared<GraphView>("TestGraph");
+    dataProvider->addChild(conv1, 0);
+    g1->add(conv1);
+    g1->addChild(conv2, conv1, 0);
+    g1->addChild(conv3, conv2, 0);
+    g1->save("cloneSharedOperators_g1");
+
+    SECTION("Check input-output connections") {
+        REQUIRE(dataProvider->getOperator()->getOutput(0) == conv1->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getInput(1) == g1->getNode("conv1_w")->getOperator()->getOutput(0));
+        REQUIRE(conv1->getOperator()->getInput(2) == g1->getNode("conv1_b")->getOperator()->getOutput(0));
+        REQUIRE(conv1->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
+        REQUIRE(conv2->getOperator()->getInput(1) == g1->getNode("conv2_w")->getOperator()->getOutput(0));
+        REQUIRE(conv2->getOperator()->getInput(2) == g1->getNode("conv2_b")->getOperator()->getOutput(0));
+        REQUIRE(conv2->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
+        REQUIRE(conv3->getOperator()->getInput(1) == g1->getNode("conv3_w")->getOperator()->getOutput(0));
+        REQUIRE(conv3->getOperator()->getInput(2) == g1->getNode("conv3_b")->getOperator()->getOutput(0));
+    }
+
+    auto g2 = g1->cloneSharedOperators();
+    g2->forwardDims();
+    g2->save("cloneSharedOperators_g2");
+
+    SECTION("Check node cloning") {
+        REQUIRE(g1->getNode("conv1") != g2->getNode("conv1"));
+        REQUIRE(g1->getNode("conv1_w") != g2->getNode("conv1_w"));
+        REQUIRE(g1->getNode("conv1_b") != g2->getNode("conv1_b"));
+        REQUIRE(g1->getNode("conv2") != g2->getNode("conv2"));
+        REQUIRE(g1->getNode("conv2_w") != g2->getNode("conv2_w"));
+        REQUIRE(g1->getNode("conv2_b") != g2->getNode("conv2_b"));
+        REQUIRE(g1->getNode("conv3") != g2->getNode("conv3"));
+        REQUIRE(g1->getNode("conv3_w") != g2->getNode("conv3_w"));
+        REQUIRE(g1->getNode("conv3_b") != g2->getNode("conv3_b"));
+    }
+
+    SECTION("Check operator cloning") {
+        REQUIRE(g1->getNode("conv1")->getOperator() == g2->getNode("conv1")->getOperator());
+        REQUIRE(g1->getNode("conv1_w")->getOperator() == g2->getNode("conv1_w")->getOperator());
+        REQUIRE(g1->getNode("conv1_b")->getOperator() == g2->getNode("conv1_b")->getOperator());
+        REQUIRE(g1->getNode("conv2")->getOperator() == g2->getNode("conv2")->getOperator());
+        REQUIRE(g1->getNode("conv2_w")->getOperator() == g2->getNode("conv2_w")->getOperator());
+        REQUIRE(g1->getNode("conv2_b")->getOperator() == g2->getNode("conv2_b")->getOperator());
+        REQUIRE(g1->getNode("conv3")->getOperator() == g2->getNode("conv3")->getOperator());
+        REQUIRE(g1->getNode("conv3_w")->getOperator() == g2->getNode("conv3_w")->getOperator());
+        REQUIRE(g1->getNode("conv3_b")->getOperator() == g2->getNode("conv3_b")->getOperator());
+    }
+
+    SECTION("Check input-output connections") {
+        REQUIRE(dataProvider->getOperator()->getOutput(0) == g2->getNode("conv1")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(1) == g2->getNode("conv1_w")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getInput(2) == g2->getNode("conv1_b")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(1) == g2->getNode("conv2_w")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getInput(2) == g2->getNode("conv2_b")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(1) == g2->getNode("conv3_w")->getOperator()->getOutput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getInput(2) == g2->getNode("conv3_b")->getOperator()->getOutput(0));
+    }
+}
+
+
+TEST_CASE("[core/graph] GraphView(insertParent)") {
+    auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
+    auto conv1 = Conv(3, 32, {3, 3}, "conv1");
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2");
+    auto conv3 = Conv(32, 64, {1, 1}, "conv3");
+    auto g = std::make_shared<GraphView>("TestGraph");
+    dataProvider->addChild(conv1, 0);
+    g->add(conv1);
+    g->addChild(conv2, conv1, 0);
+    g->addChild(conv3, conv1, 0);
+    g->save("graphForwardDims");
+    g->forwardDims();
+
+    auto newConv = Conv(32, 32, {1, 1}, "newConv");
+
+    SECTION("Check insertParent conv2 then insertParent conv3") {
+        g->insertParent(conv2, newConv, 0, 0, 0);
+
+        std::set<NodePtr> expectedConv1Children = {conv3, newConv};
+        std::set<NodePtr> expectedNewConvChildren = {conv2};
+
+        REQUIRE(conv1->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getOutput(0) == newConv->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getOutput(0) != conv2->getOperator()->getInput(0));
+        REQUIRE(newConv->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
+        REQUIRE((newConv->getChildren()) == expectedNewConvChildren);
+        REQUIRE((conv1->getChildren()) == expectedConv1Children);
+
+        g->insertParent(conv3, newConv, 0, 0, 0);
+
+        std::set<NodePtr> expectedConv1Children2 = {newConv};
+        std::set<NodePtr> expectedNewConvChildren2 = {conv2, conv3};
+
+        REQUIRE(conv1->getOperator()->getOutput(0) != conv3->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getOutput(0) == newConv->getOperator()->getInput(0));
+        REQUIRE(conv1->getOperator()->getOutput(0) != conv2->getOperator()->getInput(0));
+        REQUIRE(newConv->getOperator()->getOutput(0) == conv2->getOperator()->getInput(0));
+        REQUIRE(newConv->getOperator()->getOutput(0) == conv3->getOperator()->getInput(0));
+        REQUIRE((newConv->getChildren()) == expectedNewConvChildren2);
+        REQUIRE((conv1->getChildren()) == expectedConv1Children2);
+
+    }
+}
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 886326214a4a285fb32e5909da5114d74782ee46..8d634cc3a105c423b54b6003f41204aeb1fc5335 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -17,72 +17,72 @@
 
 using namespace Aidge;
 
-TEST_CASE("[core/operators] GenericOp(add & get parameters)", "[Operator]") {
+TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("INT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        int value = 5;
-        const char* key = "intParam";
-        Testop.addParameter(key, value);
-        REQUIRE(Testop.getParameter<int>(key) == value);
+        const char* key = "intAttr";
+        Testop.addAttr(key, int(5));
+        int registeredVal = Testop.getAttr<int>(key);
+        REQUIRE(registeredVal == 5);
     }
     SECTION("LONG") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         long value = 3;
-        const char* key = "longParam";
-        Testop.addParameter(key, value);
-        REQUIRE(Testop.getParameter<long>(key) == value);
+        const char* key = "longAttr";
+        Testop.addAttr(key, value);
+        REQUIRE(Testop.getAttr<long>(key) == value);
     }
     SECTION("FLOAT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         float value = 2.0;
-        const char* key = "floatParam";
-        Testop.addParameter(key, value);
-        REQUIRE(Testop.getParameter<float>(key) == value);
+        const char* key = "floatAttr";
+        Testop.addAttr(key, value);
+        REQUIRE(Testop.getAttr<float>(key) == value);
     }
      SECTION("VECTOR<BOOL>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<bool> value = {true, false, false, true, true};
         const char* key = "vect";
-        Testop.addParameter(key, value);
+        Testop.addAttr(key, value);
 
-        REQUIRE(Testop.getParameter<std::vector<bool>>(key).size() == value.size());
+        REQUIRE(Testop.getAttr<std::vector<bool>>(key).size() == value.size());
         for (std::size_t i=0; i < value.size(); ++i){
-            REQUIRE(Testop.getParameter<std::vector<bool>>(key)[i] == value[i]);
+            REQUIRE(Testop.getAttr<std::vector<bool>>(key)[i] == value[i]);
         }
     }
     SECTION("VECTOR<INT>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<int> value = {1, 2, 3, 4, 5, 6, 7, 8, 9};
         const char* key = "vect";
-        Testop.addParameter(key, value);
+        Testop.addAttr(key, value);
 
-        REQUIRE(Testop.getParameter<std::vector<int>>(key).size() == value.size());
+        REQUIRE(Testop.getAttr<std::vector<int>>(key).size() == value.size());
         for (std::size_t i=0; i < value.size(); ++i){
-            REQUIRE(Testop.getParameter<std::vector<int>>(key)[i] == value[i]);
+            REQUIRE(Testop.getAttr<std::vector<int>>(key)[i] == value[i]);
         }
     }
     SECTION("MULTIPLE PARAMS") {
         /*
-        Goal : Test that the offsets are well done by adding different parameters with different size.
+        Goal : Test that the offsets are well done by adding different attributes with different size.
         */
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addParameter<long>("longParam", 3);
-        Testop.addParameter<float>("floatParam", 2.0);
-        Testop.addParameter<uint8_t>("uint8Param", 5);
-        Testop.addParameter<long long>("llParam", 10);
-        REQUIRE(Testop.getParameter<long>("longParam") == 3);
-        REQUIRE(Testop.getParameter<float>("floatParam") == 2.0);
-        REQUIRE(Testop.getParameter<uint8_t>("uint8Param") == 5);
-        REQUIRE(Testop.getParameter<long long>("llParam") == 10);
+        Testop.addAttr<long>("longAttr", 3);
+        Testop.addAttr<float>("floatAttr", 2.0);
+        Testop.addAttr<uint8_t>("uint8Attr", 5);
+        Testop.addAttr<long long>("llAttr", 10);
+        REQUIRE(Testop.getAttr<long>("longAttr") == 3);
+        REQUIRE(Testop.getAttr<float>("floatAttr") == 2.0);
+        REQUIRE(Testop.getAttr<uint8_t>("uint8Attr") == 5);
+        REQUIRE(Testop.getAttr<long long>("llAttr") == 10);
     }
 }
 
-TEST_CASE("[core/operator] GenericOp(type check)", "[.ass]") {
+TEST_CASE("[core/operator] GenericOp(type check)", "[Operator]") {
     SECTION("WRONG TYPE FOR GETTER") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addParameter<long>("longParam", 3);
+        Testop.addAttr<long>("longAttr", 3);
 
         // This line should raise a failled assert
-        REQUIRE_THROWS(Testop.getParameter<int>("longParameter"));
+        REQUIRE_THROWS(Testop.getAttr<int>("longAttribute"));
     }
 }
diff --git a/unit_tests/recipies/Test_FuseMulAdd.cpp b/unit_tests/recipies/Test_FuseMulAdd.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..da53642055a3146c71a211ad7816f21c9b92d6cd
--- /dev/null
+++ b/unit_tests/recipies/Test_FuseMulAdd.cpp
@@ -0,0 +1,77 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <set>
+
+// #include "aidge/backend/cpu/operator/AddImpl.hpp"
+// #include "aidge/backend/cpu/operator/ConvImpl.hpp"
+// #include "aidge/backend/cpu/operator/FCImpl.hpp"
+// #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Recipies.hpp"
+
+namespace Aidge {
+
+TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
+    // generate the original GraphView
+    auto matmul0 = MatMul(5, "matmul0");
+    auto add0 = Add<2>("add0");
+    auto matmul1 = MatMul(5, "matmul1");
+    auto add1 = Add<2>("add1");
+
+    auto b0 = Producer({5}, "B0");
+    auto w0 = Producer({5, 5}, "W0");
+    auto b1 = Producer({5}, "B1");
+    auto w1 = Producer({5,5},"W1");
+    auto input = Producer({2,5}, "input");
+
+    input->addChild(matmul0, 0, 0);
+    w0->addChild(matmul0, 0, 1);
+
+    matmul0->addChild(add0, 0, 0);
+    b0->addChild(add0, 0, 1);
+
+    add0->addChild(matmul1, 0, 0);
+    w1->addChild(matmul1, 0, 1);
+
+    matmul1->addChild(add1, 0, 0);
+    b1->addChild(add1, 0, 1);
+
+    auto g = std::make_shared<GraphView>();
+    g->add({matmul0, add0, matmul1, add1, b0, b1});
+
+    // Check original graph
+    REQUIRE(g->getNodes() ==
+            std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
+    REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
+    REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0)));
+    REQUIRE(((matmul1->getParent(0) == add0) && (matmul1->getParent(1) == w1)));
+    REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
+
+	// Transform GraphView inplace
+    fuseMulAdd(g);
+	g->save("bonjour");
+
+	// Check new GraphView
+	 std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
+	REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
+	REQUIRE(newNodes.size() == 6);
+	for (const auto& node : newNodes) {
+		REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
+	}
+}
+}  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/recipies/Test_LabelGraph.cpp b/unit_tests/recipies/Test_LabelGraph.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..873ad68f3198c6b6adf44d8c7ae31e667c63a18d
--- /dev/null
+++ b/unit_tests/recipies/Test_LabelGraph.cpp
@@ -0,0 +1,154 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/recipies/LabelGraph.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/AvgPooling.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include <cstddef>
+
+using namespace Aidge;
+
+TEST_CASE("[LabelGraph] conv") {
+    auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
+    auto conv1 = Conv(3, 32, {3, 3}, "conv1");
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2");
+    auto conv3 = Conv(64, 10, {1, 1}, "conv3");
+    auto g1 = std::make_shared<GraphView>("TestGraph");
+    dataProvider->addChild(conv1, 0);
+    g1->add(conv1);
+    g1->addChild(conv2, conv1, 0);
+    g1->addChild(conv3, conv2, 0);
+    g1->save("LabelGraph_conv_graph");
+
+    auto g2 = labelGraph(g1);
+
+    auto dataProvider2 = Producer({16, 3, 224, 224}, "dataProvider");
+    dataProvider2->addChild(g2->getNode("conv1"), 0);
+
+    g2->forwardDims();
+    g2->save("LabelGraph_conv_label");
+
+    SECTION("Check resulting nodes") {
+        REQUIRE(g2->getNodes().size() == 3);
+        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
+    }
+}
+
+TEST_CASE("[LabelGraph] deleted node") {
+    auto g1 = Sequential({
+        Producer({16, 3, 224, 224}, "dataProvider"),
+        Conv(3, 32, {3, 3}, "conv1"),
+        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
+        Conv(32, 64, {3, 3}, "conv2"),
+        Conv(64, 10, {1, 1}, "conv3", {2, 2})
+    });
+
+    g1->save("LabelGraph_deleted_graph");
+
+    auto g2 = labelGraph(g1);
+
+    auto dataProvider2 = Producer({16, 1, 224, 224}, "dataProvider");
+    dataProvider2->addChild(g2->getNode("conv1"), 0);
+
+    g2->forwardDims();
+    g2->save("LabelGraph_deleted_label");
+
+    SECTION("Check resulting nodes") {
+        REQUIRE(g2->getNodes().size() == 3);
+        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
+    }
+
+    SECTION("Check dimensions") {
+        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 222, 222}));
+        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 220, 220}));
+        REQUIRE(g2->getNode("conv3")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 110, 110}));
+    }
+}
+
+TEST_CASE("[LabelGraph] deleted nodes") {
+    auto g1 = Sequential({
+        Producer({16, 3, 224, 224}, "dataProvider"),
+        Conv(3, 32, {3, 3}, "conv1"),
+        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
+        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
+        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
+        Conv(32, 64, {3, 3}, "conv2"),
+        GenericOperator("Dummy_to_be_removed", 1, 1, 1),
+        Conv(64, 10, {1, 1}, "conv3")
+    });
+
+    g1->save("LabelGraph_deleteds_graph");
+
+    auto g2 = labelGraph(g1);
+
+    auto dataProvider2 = Producer({16, 3, 224, 224}, "dataProvider");
+    dataProvider2->addChild(g2->getNode("conv1"), 0);
+
+    g2->forwardDims();
+    g2->save("LabelGraph_deleteds_label");
+
+    SECTION("Check resulting nodes") {
+        REQUIRE(g2->getNodes().size() == 3);
+        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv1")->getOperator()->getOutput(0) == g2->getNode("conv2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv2")->getOperator()->getOutput(0) == g2->getNode("conv3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
+    }
+}
+
+TEST_CASE("[LabelGraph] pooling") {
+    auto g1 = Sequential({
+        Producer({16, 3, 224, 224}, "dataProvider"),
+        AvgPooling({2, 2}, "pool1"),
+        MaxPooling({2, 2}, "pool2"),
+        MaxPooling({2, 2}, "pool3", {2, 2})
+    });
+
+    g1->save("LabelGraph_deleted_graph");
+
+    auto g2 = labelGraph(g1);
+
+    auto dataProvider2 = Producer({16, 1, 224, 224}, "dataProvider");
+    dataProvider2->addChild(g2->getNode("pool1"), 0);
+
+    g2->forwardDims();
+    g2->save("LabelGraph_pooling");
+
+    SECTION("Check resulting nodes") {
+        REQUIRE(g2->getNodes().size() == 3);
+        REQUIRE(g2->getNode("pool1")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("pool1")->getOperator()->getOutput(0) == g2->getNode("pool2")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("pool2")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("pool2")->getOperator()->getOutput(0) == g2->getNode("pool3")->getOperator()->getInput(0));
+        REQUIRE(g2->getNode("pool3")->getOperator()->type() == "MaxPooling");
+    }
+
+    SECTION("Check dimensions") {
+        REQUIRE(g2->getNode("pool1")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 223, 223}));
+        REQUIRE(g2->getNode("pool2")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 222, 222}));
+        REQUIRE(g2->getNode("pool3")->getOperator()->getOutput(0)->dims() == std::vector<DimSize_t>({16, 1, 111, 111}));
+    }
+}
diff --git a/unit_tests/utils/Test_StaticAttributes.cpp b/unit_tests/utils/Test_StaticAttributes.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..36c2e0454b415e1cb25cc3581016530a372b9e65
--- /dev/null
+++ b/unit_tests/utils/Test_StaticAttributes.cpp
@@ -0,0 +1,48 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include <string>
+#include <vector>
+
+#include "aidge/utils/StaticAttributes.hpp"
+
+using namespace Aidge;
+
+enum class TestAttr { a, b, c, d };
+
+namespace {
+template <>
+const char *const EnumStrings<TestAttr>::data[] = {
+    "a",
+    "b",
+    "c",
+    "d"
+};
+}
+
+using Attributes_ = StaticAttributes<TestAttr, int, float, std::string, std::vector<bool>>;
+template <TestAttr e>
+using attr = typename Attributes_::template attr<e>;
+
+TEST_CASE("[core/attributes] StaticAttribute") {
+    SECTION("TestAttr") {
+        StaticAttributes<TestAttr, int, float, std::string, std::vector<bool>> attrs(
+            attr<TestAttr::a>(42),
+            attr<TestAttr::b>(18.75),
+            attr<TestAttr::c>("test"),
+            attr<TestAttr::d>({true, false, true}));
+
+        REQUIRE(attrs.getAttr<int>("a") == 42);
+        REQUIRE_THROWS(attrs.getAttr<int>("inexistant"));
+    }
+}