diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index 73b85c8a409e675c849b9ca66557c63b5acf6359..cd56a55fa7e9cbcefba4715188fd270462e81976 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -27,6 +27,8 @@ build:ubuntu_python:
     - python3 -m pip install virtualenv
     - virtualenv venv
     - source venv/bin/activate
+    # Numpy dependancy for unit test
+    - python3 -m pip install numpy
     - export AIDGE_INSTALL=`pwd`/install
     - python3 -m pip install .
   artifacts:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 45ad49ba7208780ae33f5168f468ef37fca57925..286244bbb7d9b2ac065c3e3fbe96f7dc5115cbfa 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -66,12 +66,14 @@ endif()
 
 target_compile_features(${module_name} PRIVATE cxx_std_14)
 
+
 if (DOSANITIZE STREQUAL "ON")
 set(SANITIZE_FLAGS -fsanitize=address,leak,undefined,float-divide-by-zero -fno-omit-frame-pointer)
 else()
 set(SANITIZE_FLAGS)
 endif()
 
+
 target_compile_options(${module_name} PRIVATE
 $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
 -Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -fstrict-aliasing -Wstrict-aliasing=1 $<$<BOOL:${WERROR}>:-Werror> ${SANITIZE_FLAGS}>)
diff --git a/README.md b/README.md
index 992344a796a4634a25d2127fc49b57adeae45863..5b07e147cb05c2fa1a6d275d567dda218b131996 100644
--- a/README.md
+++ b/README.md
@@ -6,16 +6,19 @@ You can find here the C++ code of the Core library of Aidge.
 
 ## Pip installation
 
-To install aidge_core using pip, make sure to set the desired install path :
-``` bash 
-export AIDGE_INSTALL = '<path_to_aidge>/install'
-```
 
-Then run in your python environnement :
+
+To install aidge_core using pip, run the following command in your python environnement :
 ``` bash
 pip install . -v
 ```
 
+**Note:** you can specify a custom install folder by setting an environment variable:
+
+``` bash
+export AIDGE_INSTALL='<path_to_aidge>/install'
+```
+
 ## Standard C++ Compilation
 
 Create two directories ``build`` and ``ìnstall``.
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index b326e0748c2c77612dd79122fe891a6207d945dc..fc60f52274162155f8f891bf86c22c9a13b241f4 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -30,36 +30,77 @@ class test_operator_binding(unittest.TestCase):
         self.assertNotEqual(gop.name(), "")
 
     def test_param_bool(self):
-        self.generic_operator.add_parameter("bool", True)
-        self.assertEqual(self.generic_operator.get_parameter("bool"), True)
+        self.generic_operator.add_attr("bool", True)
+        self.assertEqual(self.generic_operator.has_attr("bool"), True)
+        self.assertEqual(self.generic_operator.get_attr("bool"), True)
+        self.assertEqual(self.generic_operator.get_attr_type("bool"), "bool")
+        self.assertEqual(self.generic_operator.get_attrs_name(), {"bool"})
+        self.generic_operator.del_attr("bool")
+        self.assertEqual(self.generic_operator.has_attr("bool"), False)
+        self.assertEqual(len(self.generic_operator.get_attrs_name()), 0)
 
     def test_param_int(self):
-        self.generic_operator.add_parameter("int", 1)
-        self.assertEqual(self.generic_operator.get_parameter("int"), 1)
+        self.generic_operator.add_attr("int", 1)
+        self.assertEqual(self.generic_operator.get_attr("int"), 1)
 
     def test_param_float(self):
-        self.generic_operator.add_parameter("float", 2.0)
-        self.assertEqual(self.generic_operator.get_parameter("float"), 2.0)
+        self.generic_operator.add_attr("float", 2.0)
+        self.assertEqual(self.generic_operator.get_attr("float"), 2.0)
 
     def test_param_str(self):
-        self.generic_operator.add_parameter("str", "value")
-        self.assertEqual(self.generic_operator.get_parameter("str"), "value")
+        self.generic_operator.add_attr("str", "value")
+        self.assertEqual(self.generic_operator.get_attr("str"), "value")
 
     def test_param_l_int(self):
-        self.generic_operator.add_parameter("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
-        self.assertEqual(self.generic_operator.get_parameter("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.generic_operator.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.assertEqual(self.generic_operator.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
 
     def test_param_l_bool(self):
-        self.generic_operator.add_parameter("l_bool", [True, False, False, True])
-        self.assertEqual(self.generic_operator.get_parameter("l_bool"), [True, False, False, True])
+        self.generic_operator.add_attr("l_bool", [True, False, False, True])
+        self.assertEqual(self.generic_operator.get_attr("l_bool"), [True, False, False, True])
 
     def test_param_l_float(self):
-        self.generic_operator.add_parameter("l_float", [2.0, 1.0])
-        self.assertEqual(self.generic_operator.get_parameter("l_float"), [2.0, 1.0])
+        self.generic_operator.add_attr("l_float", [2.0, 1.0])
+        self.assertEqual(self.generic_operator.get_attr("l_float"), [2.0, 1.0])
 
     def test_param_l_str(self):
-        self.generic_operator.add_parameter("l_str", ["ok"])
-        self.assertEqual(self.generic_operator.get_parameter("l_str"), ["ok"])
+        self.generic_operator.add_attr("l_str", ["ok"])
+        self.assertEqual(self.generic_operator.get_attr("l_str"), ["ok"])
+
+    def test_dynamicattribute_binding(self):
+        # Check original C++ attributes are binded
+        attrs = aidge_core.test_DynamicAttributes_binding()
+        self.assertEqual(attrs.has_attr("a"), True)
+        self.assertEqual(attrs.get_attr("a"), 42)
+        self.assertEqual(attrs.has_attr("b"), True)
+        self.assertEqual(attrs.get_attr("b"), "test")
+        self.assertEqual(attrs.has_attr("c"), True)
+        self.assertEqual(attrs.get_attr("c"), [True, False, True])
+        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c"})
+        self.assertEqual(attrs.has_attr("d"), False)
+
+        # Add Python attributes
+        attrs.add_attr("d", 18.56)
+        self.assertEqual(attrs.get_attr("d"), 18.56)
+        self.assertEqual(attrs.has_attr("d"), True)
+        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c", "d"})
+        self.assertEqual(attrs.has_attr("e"), False)
+
+        # Check that added Python attribute is accessible in C++
+        # Return the value of an attribute named "d" of type float64 (double in C++)
+        self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 18.56)
+        attrs.set_attr("d", 23.89)
+        self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
+
+    def test_compute_output_dims(self):
+        in_dims=[25, 25]
+        input = aidge_core.Producer(in_dims, name="In")
+        genOp = aidge_core.GenericOperator("genOp", 1, 1, 1, name="genOp")
+        _ = aidge_core.sequential([input, genOp])
+        self.assertListEqual(genOp.get_operator().output(0).dims(), [])
+        genOp.get_operator().set_compute_output_dims(lambda x:x)
+        genOp.get_operator().compute_output_dims()
+        self.assertListEqual(genOp.get_operator().output(0).dims(), in_dims)
 
 if __name__ == '__main__':
-    unittest.main()
\ No newline at end of file
+    unittest.main()
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index 02c7598820d2429bc49ff9a2f02c8ee841783173..566650713c36236c19763f466ee906970466c02e 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -11,7 +11,7 @@ SPDX-License-Identifier: EPL-2.0
 import unittest
 import aidge_core
 
-class test_parameters(unittest.TestCase):
+class test_attributes(unittest.TestCase):
     """Very basic test to make sure the python APi is not broken.
     Can be remove in later stage of the developpement.
     """
@@ -27,21 +27,21 @@ class test_parameters(unittest.TestCase):
         out_channels = 8
         k_dims = [2, 2]
         conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator()
-        self.assertEqual(conv_op.get("InChannels"), in_channels)
-        self.assertEqual(conv_op.get("OutChannels"), out_channels)
-        self.assertEqual(conv_op.get("KernelDims"), k_dims)
+        self.assertEqual(conv_op.get_attr("InChannels"), in_channels)
+        self.assertEqual(conv_op.get_attr("OutChannels"), out_channels)
+        self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
 
     def test_fc(self):
         out_channels = 8
         nb_bias = True
         fc_op = aidge_core.FC(out_channels, nb_bias).get_operator()
-        self.assertEqual(fc_op.get("OutChannels"), out_channels)
-        self.assertEqual(fc_op.get("NoBias"), nb_bias)
+        self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
+        self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
 
     def test_matmul(self):
         out_channels = 8
-        matmul_op = aidge_core.Matmul(out_channels).get_operator()
-        self.assertEqual(matmul_op.get("OutChannels"), out_channels)
+        matmul_op = aidge_core.MatMul(out_channels).get_operator()
+        self.assertEqual(matmul_op.get_attr("OutChannels"), out_channels)
 
     def test_producer_1D(self):
         dims = [5]
@@ -71,7 +71,7 @@ class test_parameters(unittest.TestCase):
     def test_leaky_relu(self):
         negative_slope = 0.25
         leakyrelu_op = aidge_core.LeakyReLU(negative_slope).get_operator()
-        self.assertEqual(leakyrelu_op.get("NegativeSlope"), negative_slope)
+        self.assertEqual(leakyrelu_op.get_attr("NegativeSlope"), negative_slope)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/aidge_core/unit_tests/test_recipies.py b/aidge_core/unit_tests/test_recipies.py
new file mode 100644
index 0000000000000000000000000000000000000000..754907443530f7e73d1e10ed9549d0c8eb78a011
--- /dev/null
+++ b/aidge_core/unit_tests/test_recipies.py
@@ -0,0 +1,78 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+
+class test_recipies(unittest.TestCase):
+    """
+    """
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    def test_remove_flatten(self):
+        graph_view = aidge_core.sequential([
+            aidge_core.GenericOperator("Flatten", 1, 1, 1, name="Flatten0"),
+            aidge_core.FC(50, name='0')
+        ])
+        old_nodes = graph_view.get_nodes()
+        aidge_core.remove_flatten(graph_view)
+        self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 1)
+        self.assertTrue("Flatten0" not in [i.name for i in graph_view.get_nodes()])
+
+        self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
+
+    def test_fuse_matmul_add(self):
+        matmul0 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul0")
+        add0 = aidge_core.Add(name="Add0")
+        matmul1 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul1")
+        add1 = aidge_core.Add(name="Add1")
+
+        graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
+
+        w0 = aidge_core.Producer([1, 1], name="W0")
+        w0.add_child(matmul0, 0, 1)
+        graph_view.add(w0)
+
+        b0 = aidge_core.Producer([1], name="B0")
+        b0.add_child(add0, 0, 1)
+        graph_view.add(b0)
+
+        w1 = aidge_core.Producer([1, 1], name="W1")
+        w1.add_child(matmul1, 0, 1)
+        graph_view.add(w1)
+
+        b1 = aidge_core.Producer([1], name="B1")
+        b1.add_child(add1, 0, 1)
+        graph_view.add(b1)
+
+        old_nodes = graph_view.get_nodes()
+        aidge_core.fuse_mul_add(graph_view)
+
+        self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 2)
+        self.assertTrue("MatMul0" not in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("Add0" not in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("MatMul1" not in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("Add1" not in [i.name() for i in graph_view.get_nodes()])
+
+        self.assertTrue("W0" in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("B0" in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("W1" in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("B1" in [i.name() for i in graph_view.get_nodes()])
+        # TODO : Vérifier que FC bien crée
+
+if __name__ == '__main__':
+    unittest.main()
+
+
+
diff --git a/aidge_core/unit_tests/test_tensor.py b/aidge_core/unit_tests/test_tensor.py
new file mode 100644
index 0000000000000000000000000000000000000000..a214a0e354c64b515d0a7ac24d81c85e116938ca
--- /dev/null
+++ b/aidge_core/unit_tests/test_tensor.py
@@ -0,0 +1,44 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+
+from functools import reduce
+import numpy as np
+
+class test_tensor(unittest.TestCase):
+    """
+    """
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    def test_getcoord_getidx(self):
+        dims = [2,2,2]
+        size = reduce((lambda x, y: x*y), dims)
+
+        np_array = np.arange(size).reshape(dims)
+
+        t = aidge_core.Tensor(np_array)
+        for i in range(size):
+            coord = t.get_coord(i)
+            idx = t.get_idx(coord)
+            self.assertEqual(idx, i)
+
+if __name__ == '__main__':
+    unittest.main()
+
+
+
+
+
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index cfda3ac7fa024f8cf80b4589d978b9b5bff5b4f0..47ded2a462477958320bfad3ad84e6b8f6ef6082 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -33,7 +33,7 @@
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/GenericOperator.hpp"
-#include "aidge/operator/Matmul.hpp"
+#include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 //#include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -42,8 +42,9 @@
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
-#include "aidge/utils/CParameter.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Recipies.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index c56f66fc0b827ccccd9749b9880507dbf48c8179..dfe3d932ac68929acfd26ecf7126e07c4707bcfc 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -27,6 +27,9 @@ public:
     {
         printf("Cannot set raw pointer for backend %s\n", mBackend);
     };
+
+    virtual void* getRaw(std::size_t /*idx*/)=0;
+
     virtual std::size_t scalarSize() const = 0; // Size of one scalar (in bytes)
     constexpr const char *backend() const { return mBackend; }
     virtual ~TensorImpl() = default;
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 81b7810a8a548df7e5a2829b1a31cbe337491382..02f4df320d87d1bb02edfa5c11ffe8bc7f560986 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_DATA_H_
 #define AIDGE_DATA_H_
 
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Attributes.hpp"
 
 namespace Aidge {
 enum class DataType {
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index c3a6e478f8943253a9f9b3565db2d4452a9ca133..7422a52eb171ee6dae0e14ad67c0562295fe5d8c 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -446,18 +446,33 @@ class Tensor : public Data,
      */
     bool empty() const { return mDims.empty(); }
 
-    template <typename expectedType, std::array<std::size_t, 1>::size_type DIM>
-    constexpr expectedType &get(std::array<std::size_t, DIM> idx) {
-        assert(DIM == mDims.size());
-        assert(mImpl);
-        std::size_t unfoldedIdx = 0;
-        for (std::size_t i = 0; i < DIM - std::size_t(1); ++i) {
-            unfoldedIdx = (unfoldedIdx + idx[i]) * mDims[i + 1];
-        }
-        unfoldedIdx += idx[DIM - 1];
-        return static_cast<expectedType *>(mImpl->rawPtr())[unfoldedIdx];
+    template <typename expectedType>
+    expectedType& get(std::size_t idx){
+        // TODO : add assert expected Type compatible with datatype
+        // TODO : add assert idx < Size
+        return *reinterpret_cast<expectedType *>(mImpl->getRaw(idx));
+    }
+
+    template <typename expectedType>
+    expectedType& get(std::vector<std::size_t> coordIdx){
+        return get<expectedType>(getIdx(coordIdx));
+    }
+
+    template <typename expectedType>
+    void set(std::size_t idx, expectedType value){
+        // TODO : add assert expected Type compatible with datatype
+        // TODO : add assert idx < Size
+        void* dataPtr = mImpl->getRaw(idx);
+        std::memcpy(dataPtr, &value, sizeof(expectedType));
     }
 
+    template <typename expectedType>
+    void set(std::vector<std::size_t> coordIdx, expectedType value){
+        set<expectedType>(getIdx(coordIdx), value);
+    }
+
+
+
     std::string toString() {
         if (dims().empty()) { return "{}"; }
         std::string res;
@@ -559,6 +574,42 @@ class Tensor : public Data,
         return mGrad;
     }
 
+    /**
+     * @brief From the the 1D index, return the coordinate of an element in the tensor.
+     *
+     * @param flatIdx 1D index of the value considering a flatten tensor.
+     * @return std::vector<DimSize_t>
+     */
+    std::vector<std::size_t> getCoord(std::size_t flatIdx) const {
+        std::vector<std::size_t> coordIdx = std::vector<std::size_t>(mDims.size());
+        std::size_t idx = flatIdx;
+        for (std::size_t i = mDims.size() - 1; i > 0; --i){
+            coordIdx[i] = (idx % mDims[i]);
+            idx/=mDims[i];
+        }
+        coordIdx[0] = idx % mDims[0];
+        return coordIdx;
+    }
+
+    /**
+     * @brief From the coordinate returns the 1D index of an element in the tensor.
+     *
+     * @param coordIdx Coordinate to an element in the tensor
+     * @return DimSize_t
+     */
+    std::size_t getIdx(std::vector<std::size_t> coordIdx) const {
+        // std::size_t flatIdx = 0;
+        // std::size_t stride = 1;
+        std::size_t flatIdx = 0;
+        assert(coordIdx.size() == mDims.size() && "Coordinates does not match number of dimensions");
+        std::size_t i = 0;
+        for(; i < mDims.size() - 1; ++i){
+            assert(coordIdx[i] < mDims[i] && "Coordinates dimensions does not fit the dimensions of the tensor");
+            flatIdx = (flatIdx + coordIdx[i]) * mDims[i + 1];
+        }
+        return flatIdx + coordIdx[i];
+    }
+
 private:
     ///\bug not protected against overflow
     std::size_t computeSize() {
diff --git a/include/aidge/hook/execTime.hpp b/include/aidge/hook/ExecTime.hpp
similarity index 100%
rename from include/aidge/hook/execTime.hpp
rename to include/aidge/hook/ExecTime.hpp
diff --git a/include/aidge/hook/hook.hpp b/include/aidge/hook/Hook.hpp
similarity index 96%
rename from include/aidge/hook/hook.hpp
rename to include/aidge/hook/Hook.hpp
index f263ac0e451b52be36499b4d705af95105670d93..f874b269ef063a285b65d04e18d53262374e616d 100644
--- a/include/aidge/hook/hook.hpp
+++ b/include/aidge/hook/Hook.hpp
@@ -17,7 +17,7 @@
 #ifndef Hook_H_
 #define Hook_H_
 
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include <memory>
 
diff --git a/include/aidge/hook/outputRange.hpp b/include/aidge/hook/OutputRange.hpp
similarity index 100%
rename from include/aidge/hook/outputRange.hpp
rename to include/aidge/hook/OutputRange.hpp
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 303092911ae369473c1f3d6b7f122e3068d77028..1e0f17e6db9278e7edf2a11918472c084561a308 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -48,7 +48,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Add_Op(const Add_Op<NUM>& op)
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 2fbff53c30e376e80d07f0859851057177bf0868..b29463c675eb8516e02b83ad47816e9e9aa5d147 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class AvgPoolingParam { StrideDims, KernelDims, PaddingDims };
+enum class AvgPoolingAttr { StrideDims, KernelDims, PaddingDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public Operator,
                 public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
-                public Parameterizable<AvgPoolingParam,
+                public StaticAttributes<AvgPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, (DIM<<1) >> {
@@ -45,30 +45,30 @@ public:
 
     AvgPooling_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<AvgPoolingParam,
+    using Attributes_ = StaticAttributes<AvgPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, (DIM<<1)> >;
-    template <AvgPoolingParam e>
-    using param = typename Parameterizable_::template param<e>;
+    template <AvgPoolingAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                             const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
         : Operator(Type),
-          Parameterizable_(param<AvgPoolingParam::StrideDims>(stride_dims),
-                           param<AvgPoolingParam::KernelDims>(kernel_dims),
-                           param<AvgPoolingParam::PaddingDims>(padding_dims)) {
+          Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
+                           attr<AvgPoolingAttr::KernelDims>(kernel_dims),
+                           attr<AvgPoolingAttr::PaddingDims>(padding_dims)) {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     AvgPooling_Op(const AvgPooling_Op<DIM>& op)
         : Operator(Type),
-          Parameterizable_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -96,13 +96,13 @@ public:
         if (!mInput->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < this->template get<AvgPoolingParam::KernelDims>().size() ; ++dim) {
+            for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                             std::floor(static_cast<float>(mInput->dims()[dim+2] -
-                                                                    this->template get<AvgPoolingParam::KernelDims>()[dim] +
-                                                                    this->template get<AvgPoolingParam::PaddingDims>()[dim] +
-                                                                    this->template get<AvgPoolingParam::PaddingDims>()[dim+DIM]) /
-                                            static_cast<float>(this->template get<AvgPoolingParam::StrideDims>()[dim])));
+                                                                    this->template getAttr<AvgPoolingAttr::KernelDims>()[dim] +
+                                                                    this->template getAttr<AvgPoolingAttr::PaddingDims>()[dim] +
+                                                                    this->template getAttr<AvgPoolingAttr::PaddingDims>()[dim+DIM]) /
+                                            static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = mInput->dims()[1];
             outputDims[0] = mInput->dims()[0];
@@ -189,7 +189,7 @@ inline std::shared_ptr<Node> AvgPooling(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::AvgPoolingParam>::data[] = {"StrideDims",
+const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
                                                           "KernelDims", "PaddingDims"};
 }
 
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index f1a6ae8f52141839f72211f23511a0607e2138b6..90a6be7222ee1b3e377520f2bc612a72c2ba4ab3 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class BatchNormParam { Epsilon, Momentum };
+enum class BatchNormAttr { Epsilon, Momentum };
 
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public Operator,
                 public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
-                public Parameterizable<BatchNormParam, float, float> {
+                public StaticAttributes<BatchNormAttr, float, float> {
 public:
     // FIXME: change accessibility
     std::array<std::shared_ptr<Tensor>, 5> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
@@ -44,24 +44,25 @@ public:
 
     BatchNorm_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<BatchNormParam, float, float>;
-    template <BatchNormParam e>
-    using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<BatchNormAttr, float, float>;
+    template <BatchNormAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr BatchNorm_Op(float epsilon, float momentum)
         : Operator(Type),
-          Parameterizable_(param<BatchNormParam::Epsilon>(epsilon),
-                           param<BatchNormParam::Momentum>(momentum)) {
+          Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
+                           attr<BatchNormAttr::Momentum>(momentum)),
+          mOutput(std::make_shared<Tensor>()) {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     BatchNorm_Op(const BatchNorm_Op<DIM>& op)
         : Operator(Type),
-          Parameterizable_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -97,7 +98,6 @@ public:
         if (!mInputs[0]->empty()) {
             for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
                 if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
-                    assert(!mInputs[0]->hasImpl() && "Incompatible size with already implemented learnable parameter");
                     mInputs[i]->resize(std::array<DimSize_t, 1>({mInputs[0]->dims()[1]}));
                 }
             }
@@ -178,7 +178,7 @@ inline std::shared_ptr<Node> BatchNorm(const float epsilon = 1.0e-5F,
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::BatchNormParam>::data[] = { "Epsilon", "Momentum" };
+const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
 }
 
-#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
+#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index e95b46ae5583df9e6b471dc4005d0d9c4636ca9b..22553080c6d4d8359149b3b34c5d040e5e900c4d 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvParam { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, PaddingDims };
+enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, PaddingDims };
 
 template <DimIdx_t DIM>
 class Conv_Op : public Operator,
                 public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
-                public Parameterizable<ConvParam, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
+                public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
                                        DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >> {
 public:
     // FIXME: change accessibility
@@ -44,10 +44,10 @@ public:
 
     Conv_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<ConvParam, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
+    using Attributes_ = StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
                                              DimSize_t, DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >>;
-    template <ConvParam e>
-    using param = typename Parameterizable_::template param<e>;
+    template <ConvAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr Conv_Op(DimSize_t in_channels,
                       DimSize_t out_channels,
@@ -56,22 +56,22 @@ public:
                       const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                       const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
-          Parameterizable_(param<ConvParam::StrideDims>(stride_dims),
-                           param<ConvParam::DilationDims>(dilation_dims),
-                           param<ConvParam::InChannels>(in_channels),
-                           param<ConvParam::OutChannels>(out_channels),
-                           param<ConvParam::KernelDims>(kernel_dims),
-                           param<ConvParam::PaddingDims>(padding_dims)) {
+          Attributes_(attr<ConvAttr::StrideDims>(stride_dims),
+                           attr<ConvAttr::DilationDims>(dilation_dims),
+                           attr<ConvAttr::InChannels>(in_channels),
+                           attr<ConvAttr::OutChannels>(out_channels),
+                           attr<ConvAttr::KernelDims>(kernel_dims),
+                           attr<ConvAttr::PaddingDims>(padding_dims)) {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Conv_Op(const Conv_Op<DIM>& op)
         : Operator(Type),
-          Parameterizable_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -111,19 +111,19 @@ public:
         if (!mInputs[0]->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < this->template get<ConvParam::KernelDims>().size() ; ++dim) {
-                const DimSize_t kernelExtent = this->template get<ConvParam::DilationDims>()[dim] *
-                                                       (this->template get<ConvParam::KernelDims>()[dim] - 1) +
+            for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
+                const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
+                                                       (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                         floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
-                                                 this->template get<ConvParam::PaddingDims>()[dim] +
-                                                 this->template get<ConvParam::PaddingDims>()[dim+DIM]) /
-                              static_cast<float>(this->template get<ConvParam::StrideDims>()[dim])));
+                                                 this->template getAttr<ConvAttr::PaddingDims>()[dim] +
+                                                 this->template getAttr<ConvAttr::PaddingDims>()[dim+DIM]) /
+                              static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
             }
 
-            outputDims[1] = this->template get<ConvParam::OutChannels>();
+            outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
             outputDims[0] = mInputs[0]->dims()[0];
             mOutput->resize(outputDims);
         }
@@ -216,8 +216,14 @@ inline std::shared_ptr<Node> Conv(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvParam>::data[] = {"StrideDims", "DilationDims", "InChannels", "OutChannels",
-                                                          "KernelDims", "PaddingDims"};
+const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
+    "StrideDims",
+    "DilationDims",
+    "InChannels",
+    "OutChannels",
+    "KernelDims",
+    "PaddingDims"
+};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 12d15328cbabbe5b066fa2fb375adecd7935c889..7a4db68bae2f42eb892dd7240463e7363753b5a7 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseParam { StrideDims, DilationDims, Channels, KernelDims, PaddingDims };
+enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, PaddingDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public Operator,
                 public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
-                public Parameterizable<ConvDepthWiseParam,
+                public StaticAttributes<ConvDepthWiseAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        DimSize_t,
@@ -48,35 +48,35 @@ class ConvDepthWise_Op : public Operator,
 
     ConvDepthWise_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<ConvDepthWiseParam,
+    using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              DimSize_t,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, (DIM<<1) >>;
-    template <ConvDepthWiseParam e>
-    using param = typename Parameterizable_::template param<e>;
+    template <ConvDepthWiseAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
-          Parameterizable_(param<ConvDepthWiseParam::StrideDims>(stride_dims),
-                           param<ConvDepthWiseParam::DilationDims>(dilation_dims),
-                           param<ConvDepthWiseParam::Channels>(0),
-                           param<ConvDepthWiseParam::KernelDims>(kernel_dims),
-                           param<ConvDepthWiseParam::PaddingDims>(padding_dims)) {
+          Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
+                           attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
+                           attr<ConvDepthWiseAttr::Channels>(0),
+                           attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
+                           attr<ConvDepthWiseAttr::PaddingDims>(padding_dims)) {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op)
         : Operator(Type),
-          Parameterizable_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -103,19 +103,19 @@ class ConvDepthWise_Op : public Operator,
         if (!mInputs[0]->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < this->template get<ConvDepthWiseParam::KernelDims>().size() ; ++dim) {
-                const DimSize_t kernelExtent = this->template get<ConvDepthWiseParam::DilationDims>()[dim] *
-                                                       (this->template get<ConvDepthWiseParam::KernelDims>()[dim] - 1) +
+            for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
+                const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
+                                                       (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                         floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
-                                                 this->template get<ConvDepthWiseParam::PaddingDims>()[dim] +
-                                                 this->template get<ConvDepthWiseParam::PaddingDims>()[dim+DIM]) /
-                              static_cast<float>(this->template get<ConvDepthWiseParam::StrideDims>()[dim])));
+                                                 this->template getAttr<ConvDepthWiseAttr::PaddingDims>()[dim] +
+                                                 this->template getAttr<ConvDepthWiseAttr::PaddingDims>()[dim+DIM]) /
+                              static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
             }
-            this->template get<ConvDepthWiseParam::Channels>() = mInputs[0]->dims()[1];
-            // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template get<ConvDepthWiseParam::KernelDims>()));
+            this->template getAttr<ConvDepthWiseAttr::Channels>() = mInputs[0]->dims()[1];
+            // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
             // if (mInputs[1]->empty()) {
             //     mInputs[1]->resize(weightDims);
             // }
@@ -212,7 +212,7 @@ inline std::shared_ptr<Node> ConvDepthWise(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseParam>::data[] = {"StrideDims", "DilationDims", "Channels",
+const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels",
                                                           "KernelDims", "PaddingDims"};
 }
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 73cdab54c2cfade6fbd397d33d537b16cb5245f1..127d39a8bdfdd233cdac9e1ca6cf0bf85f656d16 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -23,17 +23,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class FCParam { OutChannels, NoBias };
+enum class FCAttr { OutChannels, NoBias };
 
 class FC_Op : public Operator,
               public Registrable<FC_Op,
                                  std::string,
                                  std::unique_ptr<OperatorImpl>(const FC_Op &)>,
-              public Parameterizable<FCParam, DimSize_t, bool> {
+              public StaticAttributes<FCAttr, DimSize_t, bool> {
 public:
     // FIXME: change accessibility
     std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(), std::make_shared<Tensor>()};
@@ -44,25 +44,25 @@ public:
 
     FC_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<FCParam, DimSize_t, bool>;
-    template <FCParam e> using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<FCAttr, DimSize_t, bool>;
+    template <FCAttr e> using attr = typename Attributes_::template attr<e>;
 
     FC_Op(DimSize_t out_channels, bool noBias)
             : Operator(Type),
-            Parameterizable_(
-                param<FCParam::OutChannels>(out_channels),
-                param<FCParam::NoBias>(noBias))
+            Attributes_(
+                attr<FCAttr::OutChannels>(out_channels),
+                attr<FCAttr::NoBias>(noBias))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     FC_Op(const FC_Op& op)
         : Operator(Type),
-          Parameterizable_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -82,7 +82,7 @@ public:
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
         if (inputIdx == 2) {
-            assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template get<FCParam::NoBias>()) == false ? static_cast<std::size_t>(this->template get<FCParam::OutChannels>()) : 0));
+            assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0));
             assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
         }
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
@@ -93,9 +93,9 @@ public:
     void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             // <in_features**, out_channels>
-            std::array<DimSize_t, 2> weightDims = {this->template get<FCParam::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
+            std::array<DimSize_t, 2> weightDims = {this->template getAttr<FCAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
             // <out_channels, batch>
-            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template get<FCParam::OutChannels>()};
+            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<FCAttr::OutChannels>()};
 
             mInputs[1]->resize(weightDims);
             mOutput->resize(outputDims);
@@ -171,7 +171,7 @@ inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, con
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::FCParam>::data[] = {"OutChannels",
+const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 8b95a219d0cbdf35c1c9e06fde1978c07c278ca4..83ca4e06d6157955c8514ef6254bc344094946c5 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -20,7 +20,7 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/CParameter.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
@@ -28,11 +28,11 @@
 namespace Aidge {
 class GenericOperator_Op
     : public Operator,
-      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> {
+      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
+      public DynamicAttributes {
    private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
-    CParameter mParams;
     IOIndex_t mNbDataIn;
     IOIndex_t mNbIn;
     IOIndex_t mNbOut;
@@ -55,11 +55,11 @@ class GenericOperator_Op
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     GenericOperator_Op(const GenericOperator_Op& op)
-        : Operator(op.type().c_str()), mParams(op.mParams), mNbDataIn(op.mNbDataIn), mNbIn(op.mNbIn), mNbOut(op.mNbOut)
+        : Operator(op.type().c_str()), mNbDataIn(op.mNbDataIn), mNbIn(op.mNbIn), mNbOut(op.mNbOut)
     {
         // cpy-ctor
         mInputs = std::vector<std::shared_ptr<Tensor>>(mNbIn);
@@ -80,41 +80,6 @@ class GenericOperator_Op
         return std::make_shared<GenericOperator_Op>(*this);
     }
 
-    /**
-     * @brief Get the Parameter object identified by its name.
-     * @tparam T expected parameter type.
-     * @param key Parameter name.
-     * @details assert if T is not the actual parameter type, if the parameter
-     * does not exist or internal parameter position is invalid.
-     * @todo Returning a T const& ? But dangerous => may get an address within
-     * param buffer that will get invalid after the CParam death.
-     * @note at() throws if the parameter does not exist, using find to test
-     * for parameter existance
-     * @return template<class T> The parameter.
-     */
-    template <class T>
-    const T& getParameter(std::string const &key) const {
-        return mParams.Get<const T>(key);
-    }
-
-    template <class T>
-    T& getParameter(std::string const &key) {
-        return mParams.Get<T>(key);
-    }
-
-    ///\brief Add a parameter value, identified by its name
-    ///\tparam T expected parameter type
-    ///\param i_ParamName Parameter name
-    ///\param i_Value Parameter value
-    ///\todo Pass i_Value by ref if large or not trivial
-    ///\bug If parameter already exists, its value is changed but written in the
-    /// internal buffer in a new location (previous value is still in memory at
-    /// its previous location)
-    template <class T>
-    void addParameter(std::string const &key, T&& value) {
-        mParams.Add<T>(key, std::forward<T>(value));
-    }
-
     // Helper functions that can be used with setComputeOutputDims():
     static const ComputeDimsFunc Identity;
 
@@ -122,10 +87,6 @@ class GenericOperator_Op
         mComputeOutputDims = func;
     }
 
-    std::string getParameterType(std::string const &key) { return mParams.getParamType(key); }
-
-    std::vector<std::string> getParametersName() { return mParams.getParametersName(); }
-
     // Override Virtual Opertor methods
     void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < mNbIn && "operators supports only x inputs");
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index dc9548515134a68ad28a8b58213b536cd43fc406..c6ee01239e1ed065587276c1891d26ba3899fe89 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -15,7 +15,7 @@
 #include <vector>
 #include <memory>
 
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
@@ -25,13 +25,13 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class LeakyReLUParam {
+enum class LeakyReLUAttr {
     NegativeSlope
 };
 
 class LeakyReLU_Op : public Operator,
     public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
-    public Parameterizable<LeakyReLUParam, float> {
+    public StaticAttributes<LeakyReLUAttr, float> {
 public:
     // FIXME: change accessibility
     std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
@@ -42,24 +42,24 @@ public:
 
     LeakyReLU_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<LeakyReLUParam, float>;
-    template <LeakyReLUParam e> using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<LeakyReLUAttr, float>;
+    template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
 
     LeakyReLU_Op(float negativeSlope)
             : Operator(Type),
-            Parameterizable_(
-                param<LeakyReLUParam::NegativeSlope>(negativeSlope))
+            Attributes_(
+                attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     LeakyReLU_Op(const LeakyReLU_Op& op)
         : Operator(Type),
-          Parameterizable_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -147,7 +147,7 @@ inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::st
 
 namespace {
 template <>
-const char* const EnumStrings<Aidge::LeakyReLUParam>::data[]
+const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
     = {"NegativeSlope"};
 }
 
diff --git a/include/aidge/operator/Matmul.hpp b/include/aidge/operator/MatMul.hpp
similarity index 74%
rename from include/aidge/operator/Matmul.hpp
rename to include/aidge/operator/MatMul.hpp
index 54bbcb267f346fd79a2b9e3a8aca571ed2e6ba91..d0dadd847a59c9d2a1c0dd97f2f200437da71863 100644
--- a/include/aidge/operator/Matmul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -23,57 +23,57 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class MatmulParam { OutChannels };
+enum class MatMulAttr { OutChannels };
 
-class Matmul_Op : public Operator,
-              public Registrable<Matmul_Op,
+class MatMul_Op : public Operator,
+              public Registrable<MatMul_Op,
                                  std::string,
-                                 std::unique_ptr<OperatorImpl>(const Matmul_Op &)>,
-              public Parameterizable<MatmulParam, DimSize_t> {
+                                 std::unique_ptr<OperatorImpl>(const MatMul_Op &)>,
+              public StaticAttributes<MatMulAttr, DimSize_t> {
 public:
     std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
     const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
-    static constexpr const char* Type = "Matmul";
+    static constexpr const char* Type = "MatMul";
 
-    Matmul_Op() = delete;
+    MatMul_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<MatmulParam, DimSize_t>;
-    template <MatmulParam e> using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<MatMulAttr, DimSize_t>;
+    template <MatMulAttr e> using attr = typename Attributes_::template attr<e>;
 
-    Matmul_Op(DimSize_t out_channels)
+    MatMul_Op(DimSize_t out_channels)
             : Operator(Type),
-            Parameterizable_(
-                param<MatmulParam::OutChannels>(out_channels))
+            Attributes_(
+                attr<MatMulAttr::OutChannels>(out_channels))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Matmul_Op(const Matmul_Op& op)
+    MatMul_Op(const MatMul_Op& op)
         : Operator(Type),
-          Parameterizable_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
         setDatatype(op.mOutput->dataType());
-        mImpl = op.mImpl ? Registrar<Matmul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
+        mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
     }
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Matmul_Op
+     * @see Operator::MatMul_Op
      */
     std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Matmul_Op>(*this);
+        return std::make_shared<MatMul_Op>(*this);
     }
 
     void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
@@ -85,9 +85,9 @@ public:
     void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             // <in_features**, out_channels>
-            std::array<DimSize_t, 2> weightDims = {static_cast<DimSize_t>(mInputs[0]->size()), this->template get<MatmulParam::OutChannels>()};
+            std::array<DimSize_t, 2> weightDims = {this->template getAttr<MatMulAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
             // <out_channels, batch>
-            std::array<DimSize_t, 1> outputDims = {this->template get<MatmulParam::OutChannels>()};
+            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()};
 
             mInputs[1]->resize(weightDims);
             mOutput->resize(outputDims);
@@ -128,7 +128,7 @@ public:
 
 
     void setBackend(const std::string& name) {
-        mImpl = Registrar<Matmul_Op>::create(name)(*this);
+        mImpl = Registrar<MatMul_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
         // FIXME: temporary workaround
@@ -150,17 +150,17 @@ public:
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
-inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const std::string& name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name);
-    addProducer(matmul, 1, {1, out_channels}, "w");
+inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& name = "") {
+    // FIXME: properly handle default w initialization in every cases
+    auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(out_channels), name);
+    addProducer(matmul, 1, {out_channels, 1}, "w");
     return matmul;
 }
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::MatmulParam>::data[] = {"OutChannels"};
+const char *const EnumStrings<Aidge::MatMulAttr>::data[] = {"OutChannels"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 775583fd4c2132a5474d136c60c1b53b47ea4c3d..eae7e30df039c0514443e567032427f7a6556360 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims };
+enum class MaxPoolingAttr { StrideDims, KernelDims, PaddingDims };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public Operator,
                 public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
-                public Parameterizable<MaxPoolingParam,
+                public StaticAttributes<MaxPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, (DIM<<1) >> {
@@ -45,31 +45,31 @@ public:
 
     MaxPooling_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<MaxPoolingParam,
+    using Attributes_ = StaticAttributes<MaxPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, (DIM<<1)> >;
-    template <MaxPoolingParam e>
-    using param = typename Parameterizable_::template param<e>;
+    template <MaxPoolingAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                             const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
         : Operator(Type),
-          Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims),
-                           param<MaxPoolingParam::KernelDims>(kernel_dims),
-                           param<MaxPoolingParam::PaddingDims>(padding_dims)),
+          Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
+                           attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+                           attr<MaxPoolingAttr::PaddingDims>(padding_dims)),
           mOutput(std::make_shared<Tensor>()) {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     MaxPooling_Op(const MaxPooling_Op<DIM>& op)
         : Operator(Type),
-          Parameterizable_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -97,13 +97,13 @@ public:
         if (!mInput->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
+            for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                             std::floor(static_cast<float>(mInput->dims()[dim+2] -
-                                                                    this->template get<MaxPoolingParam::KernelDims>()[dim] +
-                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim] +
-                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) /
-                                            static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
+                                                                    this->template getAttr<MaxPoolingAttr::KernelDims>()[dim] +
+                                                                    this->template getAttr<MaxPoolingAttr::PaddingDims>()[dim] +
+                                                                    this->template getAttr<MaxPoolingAttr::PaddingDims>()[dim+DIM]) /
+                                            static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = mInput->dims()[1];
             outputDims[0] = mInput->dims()[0];
@@ -190,7 +190,7 @@ inline std::shared_ptr<Node> MaxPooling(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
+const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 9e12b159888923cfea10dd02b7b267a46abcb3b7..0c77a752493d251303c036c4061823c4f8bc499d 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -23,7 +23,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     MetaOperator(const MetaOperator& op)
@@ -34,7 +34,7 @@ public:
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Matmul_Op
+     * @see Operator::MatMul_Op
      */
     std::shared_ptr<Operator> clone() const override {
         return std::make_shared<MetaOperator>(*this);
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 3ac651cfd6f700a129e36fb461f948f50137cfd6..5b0c199e75f0cedd4a0d36f6d2c87d89833e0dd5 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -20,7 +20,7 @@
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/hook/hook.hpp"
+#include "aidge/hook/Hook.hpp"
 
 namespace Aidge {
 
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index fbab24a0d23712b138c41e969372701fdb3d749e..593192c9f402e2646ac94cff68aa0c805f5aecd1 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -19,7 +19,7 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
@@ -51,7 +51,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Producer_Op(const Producer_Op& op)
@@ -75,6 +75,16 @@ public:
         assert(false && "Producer operator takes no input");
     }
 
+    /**
+     * @brief Set the Output Tensor of the Producer operator.
+     * This method will create a copy of the Tensor.
+     *
+     * @param newOutput Tensor containing the values to copy 
+     */
+    void setOutputTensor(const Tensor& newOutput) {
+        *mOutput = newOutput;
+    }
+
     void computeOutputDims() override final {}
 
     bool outputDimsForwarded() const override final {return true;}
@@ -163,4 +173,4 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index cebfa5718886ec26871462f48edcdbc28117da59..433e353f05f8b4ffc3cfc0e047464e7f9257da02 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -43,7 +43,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     ReLU_Op(const ReLU_Op& op)
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index e3cba81a490d3b4b28dd3754df7d274eb2e3519a..0ea6ba39b3e4def2011ae5c7b2b9c348df5e2929 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -17,7 +17,7 @@
 
 
 
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
@@ -27,13 +27,13 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ScalingParam {
+enum class ScalingAttr {
     scalingFactor
 };
 
 class Scaling_Op : public Operator,
     public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
-    public Parameterizable<ScalingParam, float> {
+    public StaticAttributes<ScalingAttr, float> {
 public:
     // FIXME: change accessibility
     std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
@@ -44,24 +44,24 @@ public:
 
     Scaling_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<ScalingParam, float>;
-    template <ScalingParam e> using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<ScalingAttr, float>;
+    template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
 
     Scaling_Op(float scalingFactor)
             : Operator(Type),
-            Parameterizable_(
-                param<ScalingParam::scalingFactor>(scalingFactor))
+            Attributes_(
+                attr<ScalingAttr::scalingFactor>(scalingFactor))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Scaling_Op(const Scaling_Op& op)
         : Operator(Type),
-          Parameterizable_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -155,7 +155,7 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
 
 namespace {
 template <>
-const char* const EnumStrings<Aidge::ScalingParam>::data[]
+const char* const EnumStrings<Aidge::ScalingAttr>::data[]
     = {"scalingFactor"};
 }
 
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index ffaf0001fbaadf7dc700fca43d77b9998ab26eb2..898bae4c31bb2c41947523a86bfb9cd5c7b732b4 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -43,7 +43,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Softmax_Op(const Softmax_Op& op)
diff --git a/include/aidge/utils/Any.hpp b/include/aidge/utils/Any.hpp
index 0310c38ccd855f64c8485a114962738203f03ef5..0e65710596d31920de60a35d600e7ae612ea2bc4 100644
--- a/include/aidge/utils/Any.hpp
+++ b/include/aidge/utils/Any.hpp
@@ -1,154 +1,552 @@
-
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
+/**
+ * Origin: https://github.com/claudiofantacci/any
+ * 
+ * Implementation of N4562 std::experimental::any (merged into C++17 as std::any)
+ * for C++11 compilers.
  *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
+ * See also:
+ *   + http://en.cppreference.com/w/cpp/any
+ *   + http://en.cppreference.com/w/cpp/experimental/any
+ *   + http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4562.html#any
+ *   + https://cplusplus.github.io/LWG/lwg-active.html#2509
  *
- * SPDX-License-Identifier: EPL-2.0
+ * Copyright (c) 2016 Denilson das Mercês Amorim
+ * Copyright (c) 2018 Claudio Fantacci
  *
- ********************************************************************************/
-
-#ifndef AIDGE_ANY_H_
-#define AIDGE_ANY_H_
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE.md or copy at http://www.boost.org/LICENSE_1_0.txt)
+ */
 
+#ifndef AIDGE_CORE_UTILS_ANY_H_
+#define AIDGE_CORE_UTILS_ANY_H_
 
-#include <typeinfo>    // typeid
-#include <type_traits> // std::enable_if_t, std::decay_t, std::is_same, std::is_copy_constructible, std::remove_cv, std::remove_reference
-#include <assert.h>
-#include <new>
+#include <stdexcept>
+#include <typeinfo>
+#include <type_traits>
+#include <utility>
 
-class _any {
-private:
-    /// @brief Operation to perform on the object.
-    enum _Op { _Op_access, _Op_get_type_info, _Op_clone, _Op_destroy };
 
-    union _Arg {
-        const std::type_info* _M_typeinfo;
-        _any* _M_any;
-    };
+namespace libany
+{
 
-    /// @brief Stored data without type information.
-    void* _M_data;
+class bad_any_cast : public std::bad_cast
+{
+public:
+    const char* what() const noexcept override
+    {
+        return "bad any_cast";
+    }
+};
 
-    /// @brief Member function to perform type-related computations on stored data.
-    void (*_M_manager)(_Op, const _any*, _Arg*);
 
+class any final
+{
 public:
-    /// @brief Class to centralize functions and type information in a memory efficient way.
-    /// @tparam Tp Decayed stored type.
-    template <typename Tp>
-    struct Manager {
-        static void manage(_Op which, const _any* __any, _Arg* __arg) {
-            auto ptr = static_cast<const Tp*>(__any->_M_data);
-            switch (which)
-            {
-            case _Op_get_type_info:
-                __arg->_M_typeinfo = &typeid(Tp);
-                break;
-            case _Op_clone:
-                __arg->_M_any->_M_data = new Tp(*ptr);
-                __arg->_M_any->_M_manager = __any->_M_manager;
-                break;
-            case _Op_destroy:
-                delete ptr;
-                break;
-            }
+    /**
+     * Constructs an object of type any with an empty state.
+     */
+    any() :
+        vtable(nullptr)
+    { }
 
+
+    /**
+     * Constructs an object of type any with an equivalent state as other.
+     */
+    any(const any& rhs) :
+        vtable(rhs.vtable)
+    {
+        if(rhs.has_value())
+        {
+            rhs.vtable->copy(rhs.storage, this->storage);
         }
-        static Tp* access(const _any* __any) {
-            return static_cast<Tp*>(__any->_M_data);
+    }
+
+
+    /**
+     * Constructs an object of type any with a state equivalent to the original state of other.
+     * rhs is left in a valid but otherwise unspecified state.
+     */
+    any(any&& rhs) noexcept :
+        vtable(rhs.vtable)
+    {
+        if(rhs.has_value())
+        {
+            rhs.vtable->move(rhs.storage, this->storage);
+            rhs.vtable = nullptr;
         }
+    }
 
-        // template <typename Up>
-        // static void create(void* data, Up&& value) {
-        //     data = new Tp(std::forward<Up>(value));
-        // }
-    };
 
-private:
-    template<typename _Tp, typename _VTp = std::decay_t<_Tp>>
-    using _Decay_if_not_any = std::enable_if_t<!std::is_same<_VTp, _any>::value, _VTp>;
+    /**
+     * Same effect as this->clear().
+     */
+    ~any()
+    {
+        this->reset();
+    }
 
-public:
-    /// @brief Default constructor
-    _any() noexcept : _M_manager(nullptr) { }
 
-    /// @brief Copy constructor
-    /// @param __other
-    _any(const _any& __other)
+    /**
+     * Constructs an object of type any that contains an object of type T direct-initialized with std::forward<ValueType>(value).
+     * T shall satisfy the CopyConstructible requirements, otherwise the program is ill-formed.
+     * This is because an `any` may be copy constructed into another `any` at any time, so a copy should always be allowed.
+     */
+    template<typename ValueType, typename = typename std::enable_if<!std::is_same<typename std::decay<ValueType>::type, any>::value>::type>
+    any(ValueType&& value)
     {
-        if (!__other._M_manager)
-            _M_manager = nullptr;
-        else
+        static_assert(std::is_copy_constructible<typename std::decay<ValueType>::type>::value,
+                      "T shall satisfy the CopyConstructible requirements.");
+        this->construct(std::forward<ValueType>(value));
+    }
+
+
+    /**
+     * Has the same effect as any(rhs).swap(*this). No effects if an exception is thrown.
+     */
+    any& operator=(const any& rhs)
+    {
+        any(rhs).swap(*this);
+        return *this;
+    }
+
+
+    /**
+     * Has the same effect as any(std::move(rhs)).swap(*this).
+     * The state of *this is equivalent to the original state of rhs and rhs is left in a valid
+     * but otherwise unspecified state.
+     */
+    any& operator=(any&& rhs) noexcept
+    {
+        any(std::move(rhs)).swap(*this);
+        return *this;
+    }
+
+
+    /**
+     * Has the same effect as any(std::forward<ValueType>(value)).swap(*this). No effect if a exception is thrown.
+     * T shall satisfy the CopyConstructible requirements, otherwise the program is ill-formed.
+     * This is because an `any` may be copy constructed into another `any` at any time, so a copy should always be allowed.
+     */
+    template<typename ValueType, typename = typename std::enable_if<!std::is_same<typename std::decay<ValueType>::type, any>::value>::type>
+    any& operator=(ValueType&& value)
+    {
+        static_assert(std::is_copy_constructible<typename std::decay<ValueType>::type>::value, "T shall satisfy the CopyConstructible requirements.");
+        any(std::forward<ValueType>(value)).swap(*this);
+        return *this;
+    }
+
+
+    /**
+     * If not empty, destroys the contained object.
+     */
+    void reset() noexcept
+    {
+        if(has_value())
         {
-            _Arg __arg;
-            __arg._M_any = this;
-            __other._M_manager(_Op_clone, &__other, &__arg);
+            this->vtable->destroy(storage);
+            this->vtable = nullptr;
         }
     }
 
-    /// @brief Move constructor
-    /// @param __other
-    _any(_any&& __other)
+
+    /**
+     * Returns true if *this has no contained object, otherwise false.
+     */
+    bool has_value() const noexcept
+    {
+        return this->vtable != nullptr;
+    }
+
+
+    /**
+     * If *this has a contained object of type T, typeid(T); otherwise typeid(void).
+     */
+    const std::type_info& type() const noexcept
     {
-        if (!__other._M_manager)
-            _M_manager = nullptr;
+        return has_value()? this->vtable->type() : typeid(void);
+    }
+
+
+    /**
+     * Exchange the states of *this and rhs.
+     */
+    void swap(any& other) noexcept
+    {
+        if(this->vtable != other.vtable)
+        {
+            any tmp(std::move(other));
+
+            other.vtable = this->vtable;
+            if(this->vtable != nullptr)
+                this->vtable->move(this->storage, other.storage);
+
+            this->vtable = tmp.vtable;
+            if(tmp.vtable != nullptr)
+            {
+                tmp.vtable->move(tmp.storage, this->storage);
+                tmp.vtable = nullptr;
+            }
+        }
         else
         {
-            _M_data = __other._M_data;
-            _M_manager = __other._M_manager;
-            const_cast<_any*>(&__other)->_M_manager = nullptr;
+            if(this->vtable != nullptr)
+                this->vtable->swap(this->storage, other.storage);
         }
     }
 
-    /// @brief By-value constructor.
-    /// @tparam T Data type.
-    /// @tparam VT Decayed data type.
-    /// @param value
-    template<typename T, typename VT = _Decay_if_not_any<T>, std::enable_if_t<std::is_copy_constructible<VT>::value, bool> = true>
-    explicit _any(T&& value)
-        : _M_manager(&Manager<VT>::manage),
-          _M_data(new VT{std::forward<T>(value)})
-    {}
 
-    ~_any()
+private:
+    union storage_union
+    {
+        using stack_storage_t = typename std::aligned_storage<2 * sizeof(void*), std::alignment_of<void*>::value>::type;
+
+        void* dynamic;
+
+        stack_storage_t stack;
+    };
+
+
+    /**
+     * Base VTable specification.
+     *
+     * Note: The caller is responsible for doing .vtable = nullptr after destructful operations
+     * such as destroy() and/or move().
+     */
+    struct vtable_type
+    {
+        /**
+         * The type of the object this vtable is for.
+         */
+        const std::type_info& (*type)() noexcept;
+
+
+        /**
+         * Destroys the object in the union.
+         * The state of the union after this call is unspecified, caller must ensure not to use src anymore.
+         */
+        void(*destroy)(storage_union&) noexcept;
+
+
+        /**
+         * Copies the **inner** content of the src union into the yet unitialized dest union.
+         * As such, both inner objects will have the same state, but on separate memory locations.
+         */
+        void(*copy)(const storage_union& src, storage_union& dest);
+
+
+        /**
+         * Moves the storage from src to the yet unitialized dest union.
+         * The state of src after this call is unspecified, caller must ensure not to use src anymore.
+         */
+        void(*move)(storage_union& src, storage_union& dest) noexcept;
+
+
+        /**
+         * Exchanges the storage between lhs and rhs.
+         */
+        void(*swap)(storage_union& lhs, storage_union& rhs) noexcept;
+    };
+
+
+    /**
+     * VTable for dynamically allocated storage.
+     */
+    template<typename T>
+    struct vtable_dynamic
+    {
+        static const std::type_info& type() noexcept
+        {
+            return typeid(T);
+        }
+
+
+        static void destroy(storage_union& storage) noexcept
+        {
+            delete reinterpret_cast<T*>(storage.dynamic);
+        }
+
+
+        static void copy(const storage_union& src, storage_union& dest)
+        {
+            dest.dynamic = new T(*reinterpret_cast<const T*>(src.dynamic));
+        }
+
+
+        static void move(storage_union& src, storage_union& dest) noexcept
+        {
+            dest.dynamic = src.dynamic;
+            src.dynamic = nullptr;
+        }
+
+
+        static void swap(storage_union& lhs, storage_union& rhs) noexcept
+        {
+            std::swap(lhs.dynamic, rhs.dynamic);
+        }
+    };
+
+
+    /**
+     * VTable for stack allocated storage.
+     */
+    template<typename T>
+    struct vtable_stack
     {
-        if(_M_manager) {
-            _M_manager(_Op_destroy, this, nullptr);
-            _M_manager = nullptr;
+        static const std::type_info& type() noexcept
+        {
+            return typeid(T);
         }
+
+
+        static void destroy(storage_union& storage) noexcept
+        {
+            reinterpret_cast<T*>(&storage.stack)->~T();
+        }
+
+
+        static void copy(const storage_union& src, storage_union& dest)
+        {
+            new (&dest.stack) T(reinterpret_cast<const T&>(src.stack));
+        }
+
+
+        static void move(storage_union& src, storage_union& dest) noexcept
+        {
+            /**
+             * One of the conditions for using vtable_stack is a nothrow move constructor,
+             * so this move constructor will never throw a exception.
+             */
+            new (&dest.stack) T(std::move(reinterpret_cast<T&>(src.stack)));
+            destroy(src);
+        }
+
+
+        static void swap(storage_union& lhs, storage_union& rhs) noexcept
+        {
+            storage_union tmp_storage;
+            move(rhs, tmp_storage);
+            move(lhs, rhs);
+            move(tmp_storage, lhs);
+        }
+    };
+
+
+    /**
+     * Whether the type T must be dynamically allocated or can be stored on the stack.
+     */
+    template<typename T>
+    struct requires_allocation :
+        std::integral_constant<bool, !(std::is_nothrow_move_constructible<T>::value // N4562 6.3/3 [any.class]
+                                       && sizeof(T) <= sizeof(storage_union::stack)
+                                       && std::alignment_of<T>::value <= std::alignment_of<storage_union::stack_storage_t>::value)>
+    { };
+
+
+    /**
+     * Returns the pointer to the vtable of the type T.
+     */
+    template<typename T>
+    static vtable_type* vtable_for_type()
+    {
+        using VTableType = typename std::conditional<requires_allocation<T>::value, vtable_dynamic<T>, vtable_stack<T>>::type;
+        static vtable_type table = { VTableType::type, VTableType::destroy, VTableType::copy, VTableType::move, VTableType::swap };
+        return &table;
+    }
+
+
+protected:
+    template<typename T>
+    friend const T* any_cast(const any* operand) noexcept;
+
+
+    template<typename T>
+    friend T* any_cast(any* operand) noexcept;
+
+
+    /**
+     * Same effect as is_same(this->type(), t);
+     */
+    bool is_typed(const std::type_info& t) const
+    {
+        return is_same(this->type(), t);
+    }
+
+
+    /**
+     * Checks if two type infos are the same.
+     * If ANY_IMPL_FAST_TYPE_INFO_COMPARE is defined, checks only the address of the
+     * type infos, otherwise does an actual comparision. Checking addresses is
+     * only a valid approach when there's no interaction with outside sources
+     * (other shared libraries and such).
+     */
+    static bool is_same(const std::type_info& a, const std::type_info& b)
+    {
+#ifdef ANY_IMPL_FAST_TYPE_INFO_COMPARE
+        return &a == &b;
+#else
+        return a == b;
+#endif
+    }
+
+
+    /**
+     * Casts (with no type_info checks) the storage pointer as const T*.
+     */
+    template<typename T>
+    const T* cast() const noexcept
+    {
+        return requires_allocation<typename std::decay<T>::type>::value ? reinterpret_cast<const T*>(storage.dynamic) : reinterpret_cast<const T*>(&storage.stack);
     }
 
-    /// @brief Access type id of the value currently stored
-    /// @return
-    const std::type_info& type() const
+
+    /**
+     * Casts (with no type_info checks) the storage pointer as T*.
+     */
+    template<typename T>
+    T* cast() noexcept
     {
-        if (!_M_manager)
-            return typeid(void);
-        _Arg __arg;
-        _M_manager(_Op_get_type_info, this, &__arg);
-        return *__arg._M_typeinfo;
+        return requires_allocation<typename std::decay<T>::type>::value ? reinterpret_cast<T*>(storage.dynamic) : reinterpret_cast<T*>(&storage.stack);
+    }
+
+
+private:
+    storage_union storage; // On offset(0) so no padding for align
+
+    vtable_type* vtable;
+
+
+    template<typename ValueType, typename T>
+    typename std::enable_if<requires_allocation<T>::value>::type do_construct(ValueType&& value)
+    {
+        storage.dynamic = new T(std::forward<ValueType>(value));
+    }
+
+
+    template<typename ValueType, typename T>
+    typename std::enable_if<!requires_allocation<T>::value>::type do_construct(ValueType&& value)
+    {
+        new (&storage.stack) T(std::forward<ValueType>(value));
+    }
+
+
+    /**
+     * Chooses between stack and dynamic allocation for the type decay_t<ValueType>,
+     * assigns the correct vtable, and constructs the object on our storage.
+     */
+    template<typename ValueType>
+    void construct(ValueType&& value)
+    {
+        using T = typename std::decay<ValueType>::type;
+
+        this->vtable = vtable_for_type<T>();
+
+        do_construct<ValueType,T>(std::forward<ValueType>(value));
     }
 };
 
-/// @brief Access value stored in the object converted in the template type if possible.
-/// @tparam _ValueType
-/// @param __any
-/// @return Stored value.
-template<typename _ValueType>
-inline _ValueType any_cast(const _any& __any)
+
+namespace detail
+{
+    template<typename ValueType>
+    inline ValueType any_cast_move_if_true(typename std::remove_reference<ValueType>::type* p, std::true_type)
+    {
+        return std::move(*p);
+    }
+
+
+    template<typename ValueType>
+    inline ValueType any_cast_move_if_true(typename std::remove_reference<ValueType>::type* p, std::false_type)
+    {
+        return *p;
+    }
+}
+
+
+/**
+ * Performs *any_cast<add_const_t<remove_reference_t<ValueType>>>(&operand), or throws bad_any_cast on failure.
+ */
+template<typename ValueType>
+inline ValueType any_cast(const any& operand)
+{
+    auto p = any_cast<typename std::add_const<typename std::remove_reference<ValueType>::type>::type>(&operand);
+    if(p == nullptr) throw bad_any_cast();
+    return *p;
+}
+
+
+/**
+ * Performs *any_cast<remove_reference_t<ValueType>>(&operand), or throws bad_any_cast on failure.
+ */
+template<typename ValueType>
+inline ValueType any_cast(any& operand)
+{
+    auto p = any_cast<typename std::remove_reference<ValueType>::type>(&operand);
+    if(p == nullptr) throw bad_any_cast();
+    return *p;
+}
+
+
+/**
+ * If ANY_IMPL_ANYCAST_MOVEABLE is not defined, does as N4562 specifies:
+ *     Performs *any_cast<remove_reference_t<ValueType>>(&operand), or throws bad_any_cast on failure.
+ *
+ * If ANY_IMPL_ANYCAST_MOVEABLE is defined, does as LWG Defect 2509 specifies [1]:
+ *     If ValueType is MoveConstructible and isn't a lvalue reference, performs
+ *     std::move(*any_cast<remove_reference_t<ValueType>>(&operand)), otherwise
+ *     *any_cast<remove_reference_t<ValueType>>(&operand).
+ *     Throws bad_any_cast on failure.
+ *
+ *     [1] https://cplusplus.github.io/LWG/lwg-active.html#2509
+ */
+template<typename ValueType>
+inline ValueType any_cast(any&& operand)
+{
+#ifdef ANY_IMPL_ANY_CAST_MOVEABLE
+    using can_move = std::integral_constant<bool, std::is_move_constructible<ValueType>::value && !std::is_lvalue_reference<ValueType>::value>;
+#else
+    using can_move = std::false_type;
+#endif
+
+    auto p = any_cast<typename std::remove_reference<ValueType>::type>(&operand);
+    if(p == nullptr) throw bad_any_cast();
+    return detail::any_cast_move_if_true<ValueType>(p, can_move());
+}
+
+
+/**
+ * If operand != nullptr && operand->type() == typeid(ValueType), a pointer to the object
+ * contained by operand, otherwise nullptr.
+ */
+template<typename T>
+inline const T* any_cast(const any* operand) noexcept
+{
+    if(operand == nullptr || !operand->is_typed(typeid(T)))
+        return nullptr;
+    else
+        return operand->cast<T>();
+}
+
+
+/**
+ * If operand != nullptr && operand->type() == typeid(ValueType), a pointer to the object
+ * contained by operand, otherwise nullptr.
+ */
+template<typename T>
+inline T* any_cast(any* operand) noexcept
 {
-    using _Up =  std::remove_cv_t<std::remove_reference_t<_ValueType>>;
-    assert((std::__or_<std::is_reference<_ValueType>, std::is_copy_constructible<_ValueType>>::value && "Template argument must be a reference or CopyConstructible type"));
-    assert((std::is_constructible<_ValueType, const _Up&>::value && "Template argument must be constructible from a const value."));
-    assert(std::is_object<_Up>::value);
-    assert(__any.type() == typeid(_Up));
-    auto __p = static_cast<_Up*>(__any._M_data);
-    if (__p)
-        return static_cast<_ValueType>(*__p);
-    throw std::bad_cast();
+    if(operand == nullptr || !operand->is_typed(typeid(T)))
+        return nullptr;
+    else
+        return operand->cast<T>();
+}
+
+
+inline void swap(any& lhs, any& rhs) noexcept
+{
+    lhs.swap(rhs);
+}
+
 }
 
-#endif /* AIDGE_ANY_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_UTILS_ANY_H_ */
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..76875f15ff4229522e6208b0edb23ec519ff59ce
--- /dev/null
+++ b/include/aidge/utils/Attributes.hpp
@@ -0,0 +1,76 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_ATTRIBUTES_H_
+#define AIDGE_CORE_UTILS_ATTRIBUTES_H_
+
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#endif
+#include <vector>
+#include <string>
+
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
+namespace {
+// This is the type that will hold all the strings. Each enumerate type will
+// declare its own specialization.
+template <typename T> struct EnumStrings {
+    static const char* const data[];
+};
+}
+
+namespace Aidge {
+template<class T, std::size_t N>
+constexpr std::size_t size(T (&)[N]) { return N; }
+
+/* This abstract class allows to avoid binding Attributes.
+*  Otherwise we would need to bind every template possible of Attributes.
+*  Every operators can access the methods of this class by inheriting from
+*  Attributes in the binding code.
+*/
+class Attributes {
+public:
+    /**
+     * @brief Check if the attribute exists.
+     * @param name Name of the attribute to check.
+     * @return bool True if the attribute exists, false otherwise.
+    */
+    virtual bool hasAttr(const std::string& name) const = 0;
+
+    /**
+     * @brief Get the (implementation defined) name of the type of an attribute, returned by std::type_info::name.
+     * @param name Name of the attribute.
+     * @return std::string Name of the type as returned by std::type_info::name.
+    */
+    virtual std::string getAttrType(const std::string& name) const = 0;
+
+    /**
+     * @brief Get the attribute's name list.
+     * @return std::set<std::string> Vector of names of the attributes.
+    */
+    virtual std::set<std::string> getAttrsName() const = 0;
+
+#ifdef PYBIND
+    /* Bindable get function, does not recquire any templating.
+    *  This is thanks to py::object which allow the function to
+    *  be agnostic from its return type.
+    */
+    virtual py::object getAttrPy(const std::string& name) const = 0;
+#endif
+    virtual ~Attributes() {}
+};
+}
+
+#endif /* AIDGE_CORE_UTILS_ATTRIBUTES_H_ */
diff --git a/include/aidge/utils/CParameter.hpp b/include/aidge/utils/CParameter.hpp
deleted file mode 100644
index 7246bc3c7555c12402e864f62416b714052320d7..0000000000000000000000000000000000000000
--- a/include/aidge/utils/CParameter.hpp
+++ /dev/null
@@ -1,102 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPARAMETER_H_
-#define AIDGE_CPARAMETER_H_
-
-#include <map>
-#include <vector>
-#include <string>
-#include <type_traits>
-#include <typeinfo>
-#include <assert.h>
-
-#include "aidge/utils/Any.hpp"
-
-
-namespace Aidge {
-
-///\todo store also a fix-sized code that indicates the type
-///\todo managing complex types or excluding non-trivial, non-aggregate types
-class CParameter {
-private:
-    template<typename _ValueType>
-    inline _ValueType& any_cast_ref(const _any& __any)
-    {
-        using _Up =  std::remove_cv_t<std::remove_reference_t<_ValueType>>;
-        assert(((std::is_reference<_ValueType>::value || std::is_copy_constructible<_ValueType>::value) && "Template argument must be a reference or CopyConstructible type"));
-        assert((std::is_constructible<_ValueType, const _Up&>::value && "Template argument must be constructible from a const value."));
-        assert(std::is_object<_Up>::value);
-        assert(__any.type() == typeid(_Up));
-        if (_any::Manager<_Up>::access(&__any)) { // assess if _any object is empty
-            return *static_cast<_ValueType*>(_any::Manager<_Up>::access(&__any));
-        }
-        throw std::bad_cast();
-    }
-public:
-    CParameter() : m_Params({}){};
-    ~CParameter() = default;
-
-    /**
-     * \brief Returning a parameter identified by its name
-     * \tparam T expected parameter type
-     * \param i_ParamName Parameter name
-     * \details assert if T is not the actual parameter type, if the parameter does not
-     *  exist or interna parameter position is invalid.
-     * \todo Returning a T const& ? But dangerous => the client may get an address within
-     *  param buffer that will get invalid after the CParam death.
-     * \note at() throws if the parameter does not exist, using find to test for parameter existance
-     */
-    template<class T> T& Get(const std::string i_ParamName)
-    {
-        return any_cast_ref<T>(m_Buffer[m_Params.at(i_ParamName)]);
-    }
-
-    // template<class T> const T& Get(const std::string i_ParamName) const
-    // {
-    //     return any_cast<T>(m_Buffer[m_Params.at(i_ParamName)]);
-    // }
-
-    ///\brief Add a parameter value, identified by its name
-    ///\tparam T expected parameter type
-    ///\param i_ParamName Parameter name
-    ///\param i_Value Parameter value
-    ///\todo Pass i_Value by ref if large or not trivial
-    ///\bug If parameter already exists, its value is changed but written in the
-    /// internal buffer in a new location (previous value is still in memory at its previous location)
-    template<class T> void Add(const std::string &i_ParamName, T&& i_Value)
-    {
-        m_Params[i_ParamName] = m_Buffer.size(); // Copy pointer offset
-        m_Buffer.push_back(_any(std::forward<T>(i_Value)));
-    }
-
-
-    std::string getParamType(std::string const &i_ParamName){
-        return m_Buffer[m_Params.at(i_ParamName)].type().name();
-    }
-
-    std::vector<std::string> getParametersName(){
-        std::vector<std::string> parametersName;
-        for(auto const& it: m_Params)
-            parametersName.push_back(it.first);
-        return parametersName;
-    }
-
-private:
-    std::map<std::string, std::size_t> m_Params; // { Param name : offset }
-
-    ///\brief All raw pointers to parameters values concatenated. Use custom any class compatible with C++14.
-    std::vector<_any> m_Buffer = {};
-};
-
-}
-
-#endif /* AIDGE_CPARAMETER_H_ */
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..60f586edf947cef0e139049814263a29b4d01e24
--- /dev/null
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -0,0 +1,221 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_
+#define AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_
+
+#include <map>
+#include <vector>
+#include <type_traits>
+#include <typeinfo>
+#include <cassert>
+#include <string>
+
+#include "aidge/utils/Any.hpp"
+#include "aidge/utils/Attributes.hpp"
+
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <pybind11/embed.h>
+
+namespace py = pybind11;
+#endif
+
+
+namespace Aidge {
+
+///\todo store also a fix-sized code that indicates the type
+///\todo managing complex types or excluding non-trivial, non-aggregate types
+class DynamicAttributes : public Attributes {
+public:
+    /**
+     * \brief Returning an Attribute identified by its name
+     * \tparam T expected Attribute type
+     * \param name Attribute name
+     * \details assert if T is not the actual Attribute type or if the Attribute does not
+     *  exist
+     * \note at() throws if the Attribute does not exist, using find to test for Attribute existance
+     */
+    template<class T> T& getAttr(const std::string& name)
+    {
+#ifdef PYBIND
+        // If attribute does not exist in C++, it might have been created or modified in Python
+        auto it = mAttrs.find(name);
+        if (it == mAttrs.end()) {
+            auto itPy = mAttrsPy.find(name);
+            if (itPy != mAttrsPy.end()) {
+                // Insert the attribute back in C++
+                mAttrs.emplace(std::make_pair(name, libany::any(itPy->second.cast<T>())));
+            }
+        }
+#endif
+
+        return libany::any_cast<T&>(mAttrs.at(name));
+    }
+
+    template<class T> const T& getAttr(const std::string& name) const
+    {
+#ifdef PYBIND
+        // If attribute does not exist in C++, it might have been created or modified in Python
+        auto it = mAttrs.find(name);
+        if (it == mAttrs.end()) {
+            auto itPy = mAttrsPy.find(name);
+            if (itPy != mAttrsPy.end()) {
+                // Insert the attribute back in C++
+                mAttrs.emplace(std::make_pair(name, libany::any(itPy->second.cast<T>())));
+            }
+        }
+#endif
+
+        return libany::any_cast<const T&>(mAttrs.at(name));
+    }
+
+    ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
+    ///\tparam T expected Attribute type
+    ///\param name Attribute name
+    ///\param value Attribute value
+    template<class T> void addAttr(const std::string& name, const T& value)
+    {
+        const auto& res = mAttrs.emplace(std::make_pair(name, libany::any(value)));
+        assert(res.second && "attribute already exists");
+
+#ifdef PYBIND
+        // We cannot handle Python object if the Python interpreter is not running
+        if (Py_IsInitialized()) {
+            // Keep a copy of the attribute in py::object that is updated everytime
+            mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+        }
+#endif
+    }
+
+    ///\brief Set an Attribute value, identified by its name. If it already exists, its value (and type, if different) is changed.
+    ///\tparam T expected Attribute type
+    ///\param name Attribute name
+    ///\param value Attribute value
+    template<class T> void setAttr(const std::string& name, const T& value)
+    {
+        auto res = mAttrs.emplace(std::make_pair(name, libany::any(value)));
+        if (!res.second)
+            res.first->second = libany::any(value);
+
+#ifdef PYBIND
+        // We cannot handle Python object if the Python interpreter is not running
+        if (Py_IsInitialized()) {
+            // Keep a copy of the attribute in py::object that is updated everytime
+            auto resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+            if (!resPy.second)
+                resPy.first->second = std::move(py::cast(value));
+        }
+#endif
+    }
+
+    void delAttr(const std::string& name) {
+        mAttrs.erase(name);
+#ifdef PYBIND
+        mAttrsPy.erase(name);
+#endif
+    }
+
+#ifdef PYBIND
+    void addAttrPy(const std::string& name, py::object&& value)
+    {
+        auto it = mAttrs.find(name);
+        assert(it == mAttrs.end() && "attribute already exists");
+
+        const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
+        assert(res.second && "attribute already exists");
+    }
+
+    void setAttrPy(const std::string& name, py::object&& value)
+    {
+        auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
+        if (!resPy.second)
+            resPy.first->second = std::move(value);
+
+        // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
+        mAttrs.erase(name);
+    }
+#endif
+
+    //////////////////////////////////////
+    ///     Generic Attributes API
+    //////////////////////////////////////
+    bool hasAttr(const std::string& name) const override final {
+#ifdef PYBIND
+        // Attributes might have been created in Python, the second condition is necessary.
+        return (mAttrs.find(name) != mAttrs.end() || mAttrsPy.find(name) != mAttrsPy.end());
+#else
+        return (mAttrs.find(name) != mAttrs.end());
+#endif
+    }
+
+    std::string getAttrType(const std::string& name) const override final {
+        // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is:
+        // - C-style for C++ created attributes
+        // - Python-style for Python created attributes
+#ifdef PYBIND
+        // If attribute does not exist in C++, it might have been created in Python
+        auto it = mAttrs.find(name);
+        if (it == mAttrs.end()) {
+            auto itPy = mAttrsPy.find(name);
+            if (itPy != mAttrsPy.end()) {
+                return std::string(Py_TYPE(itPy->second.ptr())->tp_name);
+            }
+        }
+#endif
+
+        return mAttrs.at(name).type().name();
+    }
+
+    std::set<std::string> getAttrsName() const override final {
+        std::set<std::string> attrsName;
+        for(auto const& it: mAttrs)
+            attrsName.insert(it.first);
+#ifdef PYBIND
+        // Attributes might have been created in Python
+        for(auto const& it: mAttrsPy)
+            attrsName.insert(it.first);
+#endif
+        return attrsName;
+    }
+
+#ifdef PYBIND
+    /**
+     * @detail See https://github.com/pybind/pybind11/issues/1590 as to why a
+     * generic type caster for std::any is not feasable.
+     * The strategy here is to keep a copy of each attribute in py::object that is updated everytime.
+    */
+    py::object getAttrPy(const std::string& name) const {
+        return mAttrsPy.at(name);
+    };
+#endif
+
+private:
+#ifdef PYBIND
+    // Stores C++ attributes (copy) and Python-only attributes
+    // Code should be compiled with -fvisibility=hidden
+    // See https://pybind11.readthedocs.io/en/stable/faq.html:
+    // “‘SomeClass’ declared with greater visibility than the type of its 
+    // field ‘SomeClass::member’ [-Wattributes]”
+    // This map will only be populated if Python interpreter is running
+    std::map<std::string, py::object> mAttrsPy;
+    // Stores C++ attributes only
+    // mutable because it may be updated in getAttr() from Python
+    mutable std::map<std::string, libany::any> mAttrs;
+#else
+    std::map<std::string, libany::any> mAttrs;
+#endif
+};
+
+}
+
+#endif /* AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/Parameter.hpp b/include/aidge/utils/Parameter.hpp
deleted file mode 100644
index a475576170915182e25dbaa193ca8a7a3853c0e0..0000000000000000000000000000000000000000
--- a/include/aidge/utils/Parameter.hpp
+++ /dev/null
@@ -1,203 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CORE_UTILS_PARAMETER_H_
-#define AIDGE_CORE_UTILS_PARAMETER_H_
-
-#ifdef PYBIND
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
-#include <string> // Add this inclue to print error
-#endif
-#include <tuple>
-#include <cassert>
-#include <cstddef>
-
-#ifdef PYBIND
-namespace py = pybind11;
-#endif
-
-namespace {
-// This is the type that will hold all the strings. Each enumerate type will
-// declare its own specialization.
-template <typename T> struct EnumStrings {
-    static const char* const data[];
-};
-}
-
-namespace Aidge {
-template<class T, std::size_t N>
-constexpr std::size_t size(T (&)[N]) { return N; }
-
-#ifdef PYBIND
-/* This abstract class allows to avoid binding Parametrizable.
-*  Otherwise we would need to bind every template possible of Parametrizable.
-*  Every operators can access the methods of this class by inheriting from
-*  PyAbstractParametrizable in the binding code.
-*/
-class PyAbstractParametrizable{
-    public:
-        /* Bindable get function, does not recquire any templating.
-        *  This is thanks to py::object which allow the function to
-        *  be agnostic from its return type.
-        */
-        virtual py::object getPy(const char* /*name*/) = 0;
-};
-#endif
-
-template <class PARAM_ENUM, class ...T>
-class Parameterizable
-#ifdef PYBIND
-    : public PyAbstractParametrizable
-#endif
-    {
-public:
-    using Parameters = std::tuple<T...>;
-
-    // Helper class to pass to the constructor
-    template <PARAM_ENUM paramEnum>
-    class param {
-    public:
-        constexpr param(const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& v) : value(v) {}
-        const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type value;
-    };
-
-/*
-    // Direct tuple initialization
-    Parameterizable(T... params) : mParams({params...}) {
-
-    }
-*/
-
-    // Constructor for parameters initialization.
-    // Compile-time garantee that every parameter is initialized.
-    template <PARAM_ENUM ...paramEnum> // non-type parameter pack
-    constexpr Parameterizable(const param<paramEnum>&&... params) {
-        // Check number of params consistency
-        static_assert(sizeof...(params) == std::tuple_size<std::tuple<T...>>::value, "wrong number of parameters in constructor");
-        // static_assert(size(EnumStrings<PARAM_ENUM>::data) == std::tuple_size<std::tuple<T...>>::value, "wrong number of parameters in enum string");
-
-        // Check no duplicates
-        constexpr std::array<PARAM_ENUM, std::tuple_size<std::tuple<T...>>::value> pe = { paramEnum... };
-        static_assert(!hasDuplicates(pe), "duplicate parameter"); // requires C++14
-
-        // Init params with constructor arguments
-        const std::array<PARAM_ENUM, std::tuple_size<std::tuple<T...>>::value> p = { ((void)(get<paramEnum>() = params.value), paramEnum) ... };
-        (void)p; // avoid unused warning
-    }
-
-    Parameterizable(const Parameterizable& params):
-        mParams(params.mParams)
-    {
-        // cpy-ctor (required for Operator cpy-ctor)
-    }
-
-    // Compile-time access with enum
-    template <PARAM_ENUM paramEnum>
-    constexpr typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() {
-        return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
-    }
-
-    template <PARAM_ENUM paramEnum>
-    constexpr const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() const {
-        return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
-    }
-
-    // Runtime access with enum
-    template <typename R>
-    constexpr R& get(PARAM_ENUM paramEnum) {
-        return get<R>(static_cast<std::size_t>(paramEnum));
-    }
-
-    template <typename R>
-    constexpr const R& get(PARAM_ENUM paramEnum) const {
-        return get<R>(static_cast<std::size_t>(paramEnum));
-    }
-
-    // Runtime existance check with name
-    constexpr bool isParam(const char* name) const {
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-
-    // Runtime access with name
-    template <typename R>
-    constexpr R& get(const char* name) {
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
-                return get<R>(i);
-            }
-        }
-
-        assert(false && "parameter not found");
-    }
-
-    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
-    constexpr typename std::enable_if<(SIZE > 0), R&>::type get(std::size_t i) {
-        if (i == SIZE) {
-            if (std::is_same<R, typename std::tuple_element<SIZE,std::tuple<T...>>::type>::value) {
-                return reinterpret_cast<R&>(std::get<SIZE>(mParams));
-            }
-            else {
-                assert(false && "wrong parameter type");
-            }
-        }
-        else {
-            return get<R, SIZE-1>(i);
-        }
-    }
-
-    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
-    constexpr typename std::enable_if<(SIZE <= 0), R&>::type get(std::size_t i) {
-        assert(false && "parameter not found");
-    }
-
-    constexpr const std::tuple<T...>& getParams() const {
-        return mParams;
-    }
-
-    #ifdef PYBIND
-    py::object getPy(const char* name){
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
-                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
-                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
-                return py::detail::accessor_policies::tuple_item::get(py::cast(mParams), static_cast<py::size_t>(i));
-            }
-        }
-        throw py::value_error("Parameter : " + std::string(name) + " does not exist." );
-    };
-    #endif
-
-private:
-    template <typename V, std::size_t N>
-    static constexpr bool hasDuplicates(const std::array<V, N>& array) {
-        for (std::size_t i = 1; i < N; i++) {
-            for (std::size_t j = 0; j < i; j++) {
-                if (array[i] == array[j]) {
-                    return true;
-                }
-            }
-        }
-
-        return false;
-    }
-
-    std::tuple<T...> mParams;
-};
-}
-
-#endif /* AIDGE_CORE_UTILS_PARAMETER_H_ */
diff --git a/include/aidge/utils/Recipies.hpp b/include/aidge/utils/Recipies.hpp
index 4cbf8fd284bef314dbe28b19ebdae05172467bad..894e56fae2e9c2f6bcf11e4e76a433f5c8058080 100644
--- a/include/aidge/utils/Recipies.hpp
+++ b/include/aidge/utils/Recipies.hpp
@@ -17,11 +17,54 @@
 
 namespace Aidge{
 
+// FUSE MATMUL + ADD -> FC
+
+/**
+ * @brief Merge ``MatMul`` and :cpp:function:`Aidge::Add` Node into a :cpp:function:`Aidge::FC` Node.
+ *
+ * @param nodes Strict set of Node to merge.
+ */
 void fuseMulAdd(std::set<std::shared_ptr<Node>> nodes);
+/**
+ * @brief Merge ``MatMul`` and :cpp:function:`Aidge::Add` Node into a :cpp:function:`Aidge::FC` Node.
+ *
+ * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ */
+void fuseMulAdd(std::shared_ptr<GraphView> graphView);
+
+
+// REMOVE FLATTEN + FC -> FC
+
+/**
+ * @brief Remove ``Flatten`` before :cpp:function:`Aidge::FC` Node.
+ *
+ * @param nodes Strict set of Node to merge.
+ */
 void removeFlatten(std::set<std::shared_ptr<Node>> nodes);
+/**
+ * @brief Remove ``Flatten`` before :cpp:function:`Aidge::FC` Node.
+ *
+ * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ */
+void removeFlatten(std::shared_ptr<GraphView> graphView);
+ 
+// FUSE BN + FC || CONV -> FC || CONV
 
+/**
+ * @brief Fuse :cpp:function:`Aidge::BatchNorm` with :cpp:function:`Aidge::Conv` or :cpp:function:`Aidge::FC` Nodes.
+ * Ref: https://nenadmarkus.com/p/fusing-batchnorm-and-conv/
+ *
+ * @param nodes Strict set of Node to merge.
+ */
+void fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes);
+/**
+ * @brief Fuse :cpp:function:`Aidge::BatchNorm` with :cpp:function:`Aidge::Conv` or :cpp:function:`Aidge::FC` Nodes.
+ * Ref: https://nenadmarkus.com/p/fusing-batchnorm-and-conv/
+ *
+ * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ */
+void fuseBatchNorm(std::shared_ptr<GraphView> graphView);
 
 }
 
-
-#endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 98749c1349bad644dee2c1a8549559939791f71c..de543e95a16475c4443164af7be5c379d6554f8d 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -34,6 +34,7 @@ public:
     static std::map<Key, std::function<Func>>& registry()
     {
         #ifdef PYBIND
+        #define _CRT_SECURE_NO_WARNINGS
         if (std::getenv("AIDGE_CORE_WITH_PYBIND")){
             std::string name = std::string("registrar_")+typeid(Registrable<DerivedClass, Key, Func>).name();
             static auto shared_data = reinterpret_cast<std::map<Key, std::function<Func>> *>(py::get_shared_data(name));
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..fb800cffbcff5d4113961f8e62977417336f2cb8
--- /dev/null
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -0,0 +1,204 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
+#define AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
+
+#include <tuple>
+#include <cassert>
+#include <cstddef>
+#include <typeinfo>
+
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/Utils.hpp"
+
+namespace Aidge {
+/**
+ * @brief This class is designed to handle static attributes (i.e. known at compile-time) 
+ * with named accessors, with minimal overhead (the name strings are not stored in each object 
+ * instance and it remains possible to access attribute without overhead at compile-time).
+*/
+template <class ATTRS_ENUM, class ...T>
+class StaticAttributes : public Attributes {
+public:
+    using Attrs = std::tuple<T...>;
+
+    // Helper class to pass to the constructor
+    template <ATTRS_ENUM attrsEnum>
+    class attr {
+    public:
+        constexpr attr(const typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type& v) : value(v) {}
+        const typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type value;
+    };
+
+/*
+    // Direct tuple initialization
+    StaticAttributes(T... attrs) : mAttrs({attrs...}) {
+
+    }
+*/
+
+    // Constructor for Attributes initialization.
+    // Compile-time garantee that every attribute is initialized.
+    template <ATTRS_ENUM ...attrsEnum> // non-type attribute pack
+    constexpr StaticAttributes(const attr<attrsEnum>&&... attrs) {
+        // Check number of attrs consistency
+        static_assert(sizeof...(attrs) == std::tuple_size<std::tuple<T...>>::value, "wrong number of attributes in constructor");
+        // static_assert(size(EnumStrings<ATTRS_ENUM>::data) == std::tuple_size<std::tuple<T...>>::value, "wrong number of attributes in enum string");
+
+        // Check no duplicates
+        constexpr std::array<ATTRS_ENUM, std::tuple_size<std::tuple<T...>>::value> pe = { attrsEnum... };
+        static_assert(!hasDuplicates(pe), "duplicate attribute"); // requires C++14
+
+        // Init attrs with constructor arguments
+        const std::array<ATTRS_ENUM, std::tuple_size<std::tuple<T...>>::value> p = { ((void)(getAttr<attrsEnum>() = attrs.value), attrsEnum) ... };
+        (void)p; // avoid unused warning
+    }
+
+    // Compile-time access with enum
+    template <ATTRS_ENUM attrsEnum>
+    constexpr typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type& getAttr() {
+        return std::get<static_cast<std::size_t>(attrsEnum)>(mAttrs);
+    }
+
+    template <ATTRS_ENUM attrsEnum>
+    constexpr const typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type& getAttr() const {
+        return std::get<static_cast<std::size_t>(attrsEnum)>(mAttrs);
+    }
+
+    // Runtime access with enum
+    template <typename R>
+    constexpr R& getAttr(ATTRS_ENUM attrsEnum) {
+        return getAttr<R>(static_cast<std::size_t>(attrsEnum));
+    }
+
+    template <typename R>
+    constexpr const R& getAttr(ATTRS_ENUM attrsEnum) const {
+        return getAttr<R>(static_cast<std::size_t>(attrsEnum));
+    }
+
+    // Runtime access with name
+    template <typename R>
+    constexpr R& getAttr(const char* name) {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (strcmp(EnumStrings<ATTRS_ENUM>::data[i], name) == 0) {
+                return getAttr<R>(i);
+            }
+        }
+
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name);
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    constexpr typename std::enable_if<(SIZE > 0), R&>::type getAttr(std::size_t i) {
+        if (i == SIZE-1) {
+            if (std::is_same<R, typename std::tuple_element<SIZE-1,std::tuple<T...>>::type>::value) {
+                return reinterpret_cast<R&>(std::get<SIZE-1>(mAttrs));
+            }
+            else {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "wrong type for attribute with index %lu", i);
+            }
+        }
+        else {
+            return getAttr<R, SIZE-1>(i);
+        }
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    [[noreturn]] constexpr typename std::enable_if<(SIZE == 0), R&>::type getAttr(std::size_t /*i*/) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
+    }
+
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    constexpr typename std::enable_if<(SIZE > 0), const std::type_info&>::type getAttrType(std::size_t i) const {
+        if (i == SIZE-1) {
+            return typeid(typename std::tuple_element<SIZE-1,std::tuple<T...>>::type);
+        }
+        else {
+            return getAttrType<SIZE-1>(i);
+        }
+    }
+
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    [[noreturn]] constexpr typename std::enable_if<(SIZE == 0), const std::type_info&>::type getAttrType(std::size_t /*i*/) const {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
+    }
+
+    constexpr const std::tuple<T...>& getStaticAttributes() const {
+        return mAttrs;
+    }
+
+    //////////////////////////////////////
+    ///     Generic Attributes API
+    //////////////////////////////////////
+    // Runtime existance check with name
+    constexpr bool hasAttr(const std::string& name) const override final {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+
+    // Runtime type access with name
+    constexpr std::string getAttrType(const std::string& name) const override final {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                return getAttrType(i).name();
+            }
+        }
+
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name.c_str());
+    }
+
+    std::set<std::string> getAttrsName() const override final {
+        std::set<std::string> attrsName;
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            attrsName.insert(EnumStrings<ATTRS_ENUM>::data[i]);
+        }
+        return attrsName;
+    }
+
+    #ifdef PYBIND
+    py::object getAttrPy(const std::string& name) const {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
+                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
+                return py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+            }
+        }
+
+        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str());
+    };
+    #endif
+
+private:
+    template <typename V, std::size_t N>
+    static constexpr bool hasDuplicates(const std::array<V, N>& array) {
+        for (std::size_t i = 1; i < N; i++) {
+            for (std::size_t j = 0; j < i; j++) {
+                if (array[i] == array[j]) {
+                    return true;
+                }
+            }
+        }
+
+        return false;
+    }
+
+    std::tuple<T...> mAttrs;
+};
+}
+
+#endif /* AIDGE_CORE_UTILS_STATICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6387619546c66922e48cf95a8a56487d4b0d0641
--- /dev/null
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -0,0 +1,52 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_TENSOR_UTILS_H_
+#define AIDGE_CORE_UTILS_TENSOR_UTILS_H_
+#include <cmath>  // std::abs
+#include "aidge/data/Tensor.hpp"
+
+/**
+ * @brief Compare two :cpp:class:`Aidge::Tensor` value wise. The comparison function is:
+ *
+ * |t1-t2| <= absolute + relative * |t2|
+ *
+ * If a tensor value is different from the other tensor return False
+ * If the tensor does not have the same size, return False
+ * If the datatype is not the same between each tensor return False
+ * If the templated type does not correspond to the datatype of each tensor, raise an assertion error
+ *
+ * @tparam T should correspond to the type of the tensor, define the type of the absolute and relative error
+ * @param t1  first :cpp:class:`Aidge::Tensor` to test
+ * @param t2  second :cpp:class:`Aidge::Tensor` to test
+ * @param relative relative difference allowed (should be betwen 0 and 1)
+ * @param absolute absolute error allowed (shoulmd be positive)
+ * @return true if both tensor are approximately equal and have the datatype, shape. Else return false
+ */
+template <typename T>
+bool approxEq(Aidge::Tensor t1, Aidge::Tensor t2, float relative, float absolute){
+    assert(t1.dataType() == t2.dataType());
+    assert(t1.dataType() == NativeType<T>::type);
+    assert(relative >= 0);
+    assert(absolute >= 0 && absolute<=1);
+
+    if (t1.size() != t2.size()){
+        return false;
+    }
+    for(size_t i; i < t1.size(); ++i){
+        if (static_cast<float>(std::abs(t1.get<T>(i) - t2.get<T>(i))) > (absolute + (relative * static_cast<float>(std::abs(t2.get<T>(i)))))){
+            return false;
+        }
+    }
+    return true;
+}
+
+#endif /* AIDGE_CORE_UTILS_TENSOR_UTILS_H_s */
diff --git a/include/aidge/utils/Utils.hpp b/include/aidge/utils/Utils.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7c0c03c82ff252b6175d3c9bbe5395bb05127c9f
--- /dev/null
+++ b/include/aidge/utils/Utils.hpp
@@ -0,0 +1,37 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+
+#ifndef AIDGE_UTILS_H_
+#define AIDGE_UTILS_H_
+
+#include <cstdio>
+
+#ifdef NO_EXCEPTIONS
+#define AIDGE_THROW_OR_ABORT(ex, ...) \
+do { std::printf(__VA_ARGS__); std::abort(); } while (false)
+#else
+#include <stdexcept>
+#define AIDGE_THROW_OR_ABORT(ex, ...) \
+do { \
+    int n = 128; \
+    std::unique_ptr<char[]> formatted; \
+    formatted.reset(new char[n]); \
+    const int len = std::snprintf(formatted.get(), n, __VA_ARGS__); \
+    if (len >= n) { \
+        formatted.reset(new char[len + 1]); \
+        std::snprintf(formatted.get(), len + 1, __VA_ARGS__); \
+    }; \
+    throw ex(formatted.get()); \
+} while (false)
+#endif
+
+#endif //AIDGE_UTILS_H_
\ No newline at end of file
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index d6442723ecc79527e8eaa7d3e03a466c085dfa58..31470e0eb2c50b5386b64498f89419801b133d3a 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -26,10 +26,10 @@ namespace Aidge {
 
 template<typename T>
 void addCtor(py::class_<Tensor,
-                        std::shared_ptr<Tensor>, 
-                        Data, 
+                        std::shared_ptr<Tensor>,
+                        Data,
                         Registrable<Tensor,
-                                    std::tuple<std::string, DataType>, 
+                                    std::tuple<std::string, DataType>,
                                     std::unique_ptr<TensorImpl>(const Tensor&)>>& mTensor){
     mTensor.def(py::init([]( py::array_t<T, py::array::c_style | py::array::forcecast> b) {
         /* Request a buffer descriptor from Python */
@@ -46,24 +46,27 @@ void addCtor(py::class_<Tensor,
         }else{
             printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n");
         }
-        
+
         return newTensor;
-    }));
+    }))
+    .def("__setitem__", (void (Tensor::*)(std::size_t, T)) &Tensor::set)
+    .def("__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) &Tensor::set)
+    ;
 }
 
 
 void init_Tensor(py::module& m){
     py::class_<Registrable<Tensor,
-                           std::tuple<std::string, DataType>, 
+                           std::tuple<std::string, DataType>,
                            std::unique_ptr<TensorImpl>(const Tensor&)>,
                std::shared_ptr<Registrable<Tensor,
-                                           std::tuple<std::string, DataType>, 
+                                           std::tuple<std::string, DataType>,
                                            std::unique_ptr<TensorImpl>(const Tensor&)>>>(m,"TensorRegistrable");
 
-    py::class_<Tensor, std::shared_ptr<Tensor>, 
-               Data, 
+    py::class_<Tensor, std::shared_ptr<Tensor>,
+               Data,
                Registrable<Tensor,
-                           std::tuple<std::string, DataType>, 
+                           std::tuple<std::string, DataType>,
                            std::unique_ptr<TensorImpl>(const Tensor&)>> pyClassTensor
         (m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
 
@@ -74,6 +77,8 @@ void init_Tensor(py::module& m){
     .def("size", &Tensor::size)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&)) &Tensor::resize)
     .def("has_impl", &Tensor::hasImpl)
+    .def("get_coord", &Tensor::getCoord)
+    .def("get_idx", &Tensor::getIdx)
     .def_static("get_available_backends", &Tensor::getAvailableBackends)
     .def("__str__", [](Tensor& b) {
         return b.toString();
@@ -82,15 +87,27 @@ void init_Tensor(py::module& m){
         return b.size();
     })
     .def("__getitem__", [](Tensor& b, size_t idx)-> py::object {
-        // TODO : Should return error if backend not compatible with get
         if (idx >= b.size()) throw py::index_error();
         switch(b.dataType()){
             case DataType::Float64:
-                return py::cast(static_cast<double*>(b.getImpl()->rawPtr())[idx]);
+                return py::cast(b.get<double>(idx));
+            case DataType::Float32:
+                return py::cast(b.get<float>(idx));
+            case DataType::Int32:
+                return py::cast(b.get<int>(idx));
+            default:
+                return py::none();
+        }
+    })
+    .def("__getitem__", [](Tensor& b, std::vector<size_t> coordIdx)-> py::object {
+        if (b.getIdx(coordIdx) >= b.size()) throw py::index_error();
+        switch(b.dataType()){
+            case DataType::Float64:
+                return py::cast(b.get<double>(coordIdx));
             case DataType::Float32:
-                return py::cast(static_cast<float*>(b.getImpl()->rawPtr())[idx]);
+                return py::cast(b.get<float>(coordIdx));
             case DataType::Int32:
-                return py::cast(static_cast<int*>(b.getImpl()->rawPtr())[idx]);
+                return py::cast(b.get<int>(coordIdx));
             default:
                 return py::none();
         }
@@ -126,12 +143,12 @@ void init_Tensor(py::module& m){
         }
 
         return py::buffer_info(
-            tensorImpl->rawPtr(),                       /* Pointer to buffer */
-            tensorImpl->scalarSize(),                   /* Size of one scalar */
-            dataFormatDescriptor,                /* Python struct-style format descriptor */
-            b.nbDims(),                                 /* Number of dimensions */
-            dims,                                       /* Buffer dimensions */
-            strides                                     /* Strides (in bytes) for each index */
+            tensorImpl->rawPtr(),       /* Pointer to buffer */
+            tensorImpl->scalarSize(),   /* Size of one scalar */
+            dataFormatDescriptor,       /* Python struct-style format descriptor */
+            b.nbDims(),                 /* Number of dimensions */
+            dims,                       /* Buffer dimensions */
+            strides                     /* Strides (in bytes) for each index */
         );
     });
 
@@ -142,6 +159,6 @@ void init_Tensor(py::module& m){
 // #if SIZE_MAX != 0xFFFFFFFF
     addCtor<double>(pyClassTensor);
 // #endif
-    
+
 }
 }
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 3efcf7c5345bbc835aeaf6dcbc416769b8654439..ab8b4cf7b91d5eea2db5245a8c5122ab004b4766 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -12,7 +12,6 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Add.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index ecbb743d33cc5750bc60aeed8e5207dcec0c23dc..372afebdd3e1626cd0af88e335b78ec7fd73a5f4 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -16,7 +16,6 @@
 #include <vector>
 #include <array>
 
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -27,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
-  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
+  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Operator, Attributes>(
     m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 70d9bce003033e1264ac39764271773fa84c760f..f43381fecc689a292e166c4da40ea0cb4842c9e6 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -14,7 +14,6 @@
 
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
@@ -22,7 +21,7 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, PyAbstractParametrizable>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, Attributes>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
 
     m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 7e366305f287e958ea7500695c1f3285908017b1..0c09917d71e520227eed48705527adaf204857ee 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -16,7 +16,6 @@
 #include <vector>
 #include <array>
 
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -26,7 +25,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
-  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Operator, PyAbstractParametrizable>(
+  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Operator, Attributes>(
     m, ("ConvOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<DimSize_t,
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 8a81e7ba184536cbd535db24519495400bce6fdb..3f48c50f7ffdb44450c0e2a155d85dcbf9f73fd9 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -16,7 +16,6 @@
 #include <vector>
 #include <array>
 
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -27,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
-  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Operator, PyAbstractParametrizable>(
+  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Operator, Attributes>(
     m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 3b4137c6f208f96d256c72300437cc978658b84f..4b9d61d082ebed4d426b41efa071d3943f83d231 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -12,7 +12,6 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/FC.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
@@ -21,7 +20,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, PyAbstractParametrizable>(m, "FC_Op", py::multiple_inheritance());
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Attributes>(m, "FC_Op", py::multiple_inheritance());
 
   m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index bec59eaf2cecdc7f64d1da07580116c4b3334992..4cf4dae2234900722058d6555582c5b78900ab7d 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -11,6 +11,7 @@
 
 #include <pybind11/pybind11.h>
 #include <pybind11/stl.h>
+#include <pybind11/functional.h>
 #include <stdio.h>
 
 #include "aidge/backend/OperatorImpl.hpp"
@@ -20,46 +21,11 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
-    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator>(m, "GenericOperatorOp",
+    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator, DynamicAttributes>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
-    .def("get_parameter_type", &GenericOperator_Op::getParameterType)
-    .def("get_parameters_name", &GenericOperator_Op::getParametersName)
-    .def("add_parameter", &GenericOperator_Op::addParameter<bool>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<int>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<float>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::string>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<bool>>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<int>>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<float>>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<std::string>>)
-    .def("get_parameter", [](GenericOperator_Op& self, std::string key) -> py::object {
-        /*
-        This getParameter method returns the good python type without having to have
-        prior knowledge of the parameter type.
-        */
-        py::object res = py::none();
-        std::string paramType = self.getParameterType(key);
-        if(paramType == typeid(int).name())
-            res = py::cast(self.getParameter<int>(key));
-        else if(paramType == typeid(float).name())
-            res = py::cast(self.getParameter<float>(key));
-        else if(paramType == typeid(bool).name())
-            res = py::cast(self.getParameter<bool>(key));
-        else if(paramType == typeid(std::string).name())
-            res = py::cast(self.getParameter<std::string>(key));
-        else if(paramType == typeid(std::vector<bool>).name())
-            res = py::cast(self.getParameter<std::vector<bool>>(key));
-        else if(paramType == typeid(std::vector<int>).name())
-            res = py::cast(self.getParameter<std::vector<int>>(key));
-        else if(paramType == typeid(std::vector<float>).name())
-            res = py::cast(self.getParameter<std::vector<float>>(key));
-        else if(paramType == typeid(std::vector<std::string>).name())
-            res = py::cast(self.getParameter<std::vector<std::string>>(key));
-        else {
-            throw py::key_error("Failed to convert parameter type " + key + ", this issue may come from typeid function which gave an unknown key : [" + paramType + "]. Please open an issue asking to add the support for this key.");
-        }
-        return res;
-    });
+    .def_readonly_static("identity", &GenericOperator_Op::Identity)
+    .def("compute_output_dims", &GenericOperator_Op::computeOutputDims)
+    .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"));
 
     m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nbDataIn"), py::arg("nbIn"), py::arg("nbOut"),
           py::arg("name") = "");
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index c062d93f5c40fe46336fe34f6d1664f24da07732..cae8a88bab7b59189dfbc6528cd653f1c97cb73a 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -13,13 +13,12 @@
 
 #include "aidge/operator/LeakyReLU.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/Parameter.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, PyAbstractParametrizable>(m, "LeakyReLU_Op", py::multiple_inheritance());
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, Attributes>(m, "LeakyReLU_Op", py::multiple_inheritance());
 
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index b6ae27289fabe1fe4dbeea60704a61373bc850cf..2f738550041bcdb1ae809d68fa24fdf5a72e9164 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -11,8 +11,7 @@
 
 #include <pybind11/pybind11.h>
 
-#include "aidge/operator/Matmul.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/operator/MatMul.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
@@ -20,13 +19,13 @@
 namespace py = pybind11;
 namespace Aidge {
 
-void declare_Matmul(py::module &m) {
-  py::class_<Matmul_Op, std::shared_ptr<Matmul_Op>, Operator, PyAbstractParametrizable>(m, "Matmul_Op", py::multiple_inheritance());
+void declare_MatMul(py::module &m) {
+  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Operator, Attributes>(m, "MatMul_Op", py::multiple_inheritance());
 
-  m.def("Matmul", &Matmul, py::arg("out_channels"), py::arg("name") = "");
+  m.def("MatMul", &MatMul, py::arg("out_channels"), py::arg("name") = "");
 }
 
-void init_Matmul(py::module &m) {
-  declare_Matmul(m);
+void init_MatMul(py::module &m) {
+  declare_MatMul(m);
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 9bd951c446e080ff27b099527ac9bbc350646140..2efd18c816c2d588e574872b3d3776a3409dc4ba 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -16,7 +16,6 @@
 #include <vector>
 #include <array>
 
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -27,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
-  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, Attributes>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index ea9880800059e8993996e67138f89419c165fc4f..1c62cd0adf6b8712073ec0674754ce7c8c2014a5 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -13,7 +13,6 @@
 #include <pybind11/stl.h>
 
 #include "aidge/utils/Types.h"
-#include "aidge/utils/Parameter.hpp"
 // #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
@@ -26,18 +25,19 @@ template <DimIdx_t DIM>
 void declare_Producer(py::module &m) {
     // m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name"));
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&)>(&Producer), py::arg("dims"), py::arg("name") = "");
-    
+
 }
 
 
 void init_Producer(py::module &m) {
     py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, Operator>(
-        m, 
-        "ProducerOp", 
+        m,
+        "ProducerOp",
         py::multiple_inheritance())
-    .def("dims", &Producer_Op::dims);
+    .def("dims", &Producer_Op::dims)
+    .def("set_output_tensor", &Producer_Op::setOutputTensor);
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = "");
-    
+
     declare_Producer<1>(m);
     declare_Producer<2>(m);
     declare_Producer<3>(m);
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 78418d51a5c410cb56bb8421fd7f3dc6ec6d32db..d1287c0a928ae2ad27a839cec1c3d3955da65538 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -17,7 +17,7 @@ namespace Aidge {
 void init_Data(py::module&);
 void init_Tensor(py::module&);
 void init_OperatorImpl(py::module&);
-void init_Parameterizable(py::module&);
+void init_Attributes(py::module&);
 void init_Operator(py::module&);
 
 void init_Add(py::module&);
@@ -28,7 +28,7 @@ void init_ConvDepthWise(py::module&);
 void init_FC(py::module&);
 void init_GenericOperator(py::module&);
 void init_LeakyReLU(py::module&);
-void init_Matmul(py::module&);
+void init_MatMul(py::module&);
 void init_MaxPooling(py::module&);
 void init_Producer(py::module&);
 void init_ReLU(py::module&);
@@ -46,7 +46,7 @@ void init_GRegex(py::module&);
 void init_Recipies(py::module&);
 
 void init_Scheduler(py::module&);
-
+void init_TensorUtils(py::module&);
 
 void set_python_flag(){
     // Set an env variable to know if we run with ypthon or cpp
@@ -65,7 +65,7 @@ void init_Aidge(py::module& m){
     init_Connector(m);
 
     init_OperatorImpl(m);
-    init_Parameterizable(m);
+    init_Attributes(m);
     init_Operator(m);
     init_Add(m);
     init_AvgPooling(m);
@@ -75,7 +75,7 @@ void init_Aidge(py::module& m){
     init_FC(m);
     init_GenericOperator(m);
     init_LeakyReLU(m);
-    init_Matmul(m);
+    init_MatMul(m);
     init_MaxPooling(m);
     init_ReLU(m);
     init_Softmax(m);
@@ -86,6 +86,7 @@ void init_Aidge(py::module& m){
     init_GRegex(m);
     init_Recipies(m);
     init_Scheduler(m);
+    init_TensorUtils(m);
 }
 
 PYBIND11_MODULE(aidge_core, m) {
diff --git a/python_binding/recipies/pybind_Recipies.cpp b/python_binding/recipies/pybind_Recipies.cpp
index b4147dcb4fb82dbfe9f5b4605604725c6945ece9..93c131ef7417135bfdbc657c5c809339430616ed 100644
--- a/python_binding/recipies/pybind_Recipies.cpp
+++ b/python_binding/recipies/pybind_Recipies.cpp
@@ -20,24 +20,51 @@ namespace py = pybind11;
 
 namespace Aidge {
 void init_Recipies(py::module &m) {
-  m.def("fuse_mul_add", &fuseMulAdd, py::arg("nodes"), R"mydelimiter(
-    Recipie to Fuse MatMul and Add operators into an `aidge.FC` operator.
-    
-    Parameters
-    ----------
+
+
+  m.def("fuse_mul_add", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseMulAdd), py::arg("graph_view"), R"mydelimiter(
+    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+
+    :param graph_view: Graph view on which we want to apply the recipie
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
+  m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
+    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+
     :param nodes: The MatMul and Add nodes to fuse.
-    :type nodes: list of `aidge.node`
+    :type nodes: list of :py:class:`aidge_core.Node`
+    )mydelimiter");
+
+  m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
+    Recipie to remove a flatten operator.
 
+    :param graph_view: Graph view on which we want to apply the recipie
+    :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
-  m.def("remove_flatten", &removeFlatten, py::arg("nodes"), R"mydelimiter(
+  m.def("remove_flatten", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(removeFlatten), py::arg("nodes"), R"mydelimiter(
     Recipie to remove a flatten operator.
-    
-    Parameters
-    ----------
+
     :param nodes: The flatten operator to remove.
-    :type nodes: list of `aidge.node`
+    :type nodes: list of :py:class:`aidge_core.Node`
+    )mydelimiter");
+  m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
+    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
+    :param nodes: The MatMul and Add nodes to fuse.
+    :type nodes: list of :py:class:`aidge_core.Node`
+    )mydelimiter");
+
+  m.def("fuse_batchnorm", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseBatchNorm), py::arg("graph_view"), R"mydelimiter(
+    Recipie to remove a flatten operator.
+
+    :param graph_view: Graph view on which we want to apply the recipie
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
+  m.def("fuse_batchnorm", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseBatchNorm), py::arg("nodes"), R"mydelimiter(
+    Recipie to remove a flatten operator.
+
+    :param nodes: The flatten operator to remove.
+    :type nodes: list of :py:class:`aidge_core.Node`
     )mydelimiter");
-  
 }
 } // namespace Aidge
diff --git a/python_binding/utils/pybind_Parameter.cpp b/python_binding/utils/pybind_Parameter.cpp
index 358316ea00413813d6d482a8a4601e69af3aa992..2957876f31ad0781a36905cef3a5ae88934b6a8a 100644
--- a/python_binding/utils/pybind_Parameter.cpp
+++ b/python_binding/utils/pybind_Parameter.cpp
@@ -1,12 +1,36 @@
 #include <pybind11/pybind11.h>
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
-void init_Parameterizable(py::module& m){
-    py::class_<PyAbstractParametrizable, std::shared_ptr<PyAbstractParametrizable>>(m, "PyAbstractParametrizable")
-    .def("get", &PyAbstractParametrizable::getPy, py::arg("name"))
-    ;
+DynamicAttributes test_DynamicAttributes_binding() {
+    DynamicAttributes attrs;
+    attrs.addAttr<int>("a", 42);
+    attrs.addAttr<std::string>("b", "test");
+    attrs.addAttr<std::vector<bool>>("c", {true, false, true});
+    return attrs;
 }
+
+double test_DynamicAttributes_binding_check(DynamicAttributes& attrs) {
+    return attrs.getAttr<double>("d");
+}
+
+void init_Attributes(py::module& m){
+    py::class_<Attributes, std::shared_ptr<Attributes>>(m, "Attributes")
+    .def("has_attr", &Attributes::hasAttr, py::arg("name"))
+    .def("get_attr_type", &Attributes::getAttrType, py::arg("name"))
+    .def("get_attrs_name", &Attributes::getAttrsName)
+    .def("get_attr", &Attributes::getAttrPy, py::arg("name"));
+
+    py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
+    .def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
+    .def("set_attr", &DynamicAttributes::setAttrPy, py::arg("name"), py::arg("value"))
+    .def("del_attr", &DynamicAttributes::delAttr, py::arg("name"));
+
+    m.def("test_DynamicAttributes_binding", &test_DynamicAttributes_binding);
+    m.def("test_DynamicAttributes_binding_check", &test_DynamicAttributes_binding_check, py::arg("attrs"));
+}
+
 }
 
diff --git a/python_binding/utils/pybind_TensorUtils.cpp b/python_binding/utils/pybind_TensorUtils.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..78825a5f3b8d45f22f76c57bd780dc7019fbc123
--- /dev/null
+++ b/python_binding/utils/pybind_TensorUtils.cpp
@@ -0,0 +1,57 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include <string>
+
+#include "aidge/utils/TensorUtils.hpp"
+
+namespace py = pybind11;
+
+namespace Aidge {
+
+template<typename T>
+void addTensorUtilsFunction(py::module &m){
+    m.def("approx_eq",
+    & approxEq<T>,
+    py::arg("t1"),
+    py::arg("t2"),
+    py::arg("relative"),
+    py::arg("absolute"),
+    R"mydelimiter(
+        Compare two :cpp:class:`Aidge::Tensor` value wise. The comparison function is:
+            |t1-t2| <= absolute + relative * |t2|
+
+        If a tensor value is different from the other tensor return False
+        If the tensor does not have the same size, return False
+        If the datatype is not the same between each tensor return False
+        If the templated type does not correspond to the datatype of each tensor, raise an assertion error
+
+        :param t1: first tensor to test
+        :type t1: :py:class:`aidge_core.Tensor`
+        :param t2: second tensor to test
+        :type t2: :py:class:`aidge_core.Tensor`
+        :param relative: relative difference allowed (should be betwen 0 and 1)
+        :type relative: float
+        :param absolute: absolute error allowed (shoulmd be positive)
+        :type absolute: float
+        )mydelimiter");
+}
+
+void init_TensorUtils(py::module &m) {
+    addTensorUtilsFunction<float>(m);
+    addTensorUtilsFunction<double>(m);
+    addTensorUtilsFunction<int>(m);
+    addTensorUtilsFunction<long>(m);
+}
+} // namespace Aidge
diff --git a/setup.py b/setup.py
index 0b0f66e9132d66cdb6385d7f8c6c69ae0cc5d0e3..16305afdfdfa5de2e328460d9e96c77eb96a9d98 100644
--- a/setup.py
+++ b/setup.py
@@ -62,11 +62,11 @@ class CMakeBuild(build_ext):
 
         os.chdir(str(build_temp))
 
-        # Impose to use the executable of the python 
+        # Impose to use the executable of the python
         # used to launch setup.py to setup PythonInterp
         param_py = "-DPYTHON_EXECUTABLE=" + sys.executable
-        
-        install_path = f"{build_temp}/install" if "AIDGE_INSTALL" not in os.environ else os.environ["AIDGE_INSTALL"]
+
+        install_path = os.path.join(sys.prefix, "lib", "libAidge")  if "AIDGE_INSTALL" not in os.environ else os.environ["AIDGE_INSTALL"]
 
         self.spawn(['cmake', str(cwd), param_py, '-DTEST=OFF', f'-DCMAKE_INSTALL_PREFIX:PATH={install_path}'])
         if not self.dry_run:
@@ -83,11 +83,11 @@ class CMakeBuild(build_ext):
             for file in files:
                 if file.endswith('.so') and (root != str(aidge_package.absolute())):
                     currentFile=os.path.join(root, file)
-                    shutil.copy(currentFile, str(aidge_package.absolute())) 
+                    shutil.copy(currentFile, str(aidge_package.absolute()))
 
         # Copy version.txt in aidge_package
         os.chdir(os.path.dirname(__file__))
-        shutil.copy("version.txt", str(aidge_package.absolute()))    
+        shutil.copy("version.txt", str(aidge_package.absolute()))
 
 
 if __name__ == '__main__':
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index bbf895285e0e00d1132eb1f46c7e67a455d705d7..03b2a9adb439eb00d0ba59a13fead4f25d617b36 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -519,17 +519,17 @@ void Aidge::GraphView::link(std::string /*name1_inID*/,
   printf("Not implemented yet.\n");
 }
 
-void Aidge::GraphView::insertParent(NodePtr childNode, 
-                  NodePtr newParentNode, 
-                  IOIndex_t childInputTensorIdx, 
-                  IOIndex_t newParentInputTensorIdx, 
+void Aidge::GraphView::insertParent(NodePtr childNode,
+                  NodePtr newParentNode,
+                  IOIndex_t childInputTensorIdx,
+                  IOIndex_t newParentInputTensorIdx,
                   IOIndex_t newParentOutputTensorIdx){
   NodePtr currentParentNode = childNode->getParent(childInputTensorIdx);
   const IOIndex_t currentParentOutputTensorIdx = childNode->input(childInputTensorIdx).second;
-  // Remove child from current parent & current Parent from child 
+  // Remove child from current parent & current Parent from child
   currentParentNode->removeChild(childNode, currentParentOutputTensorIdx);
 
-  // Add child 
+  // Add child
   currentParentNode->addChild(newParentNode,currentParentOutputTensorIdx, newParentInputTensorIdx);
   newParentNode->addChild(childNode, newParentOutputTensorIdx, childInputTensorIdx);
 
@@ -542,9 +542,8 @@ bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
   assert(mNodes.size()>0 && "There must be at least one Node to replace");
 
   bool replacable;
-  std::shared_ptr<Node> previousInputNode;
-  std::shared_ptr<Node> newInputNode;
-  std::shared_ptr<Node> previousOutputNode;
+  std::shared_ptr<Node> previousInputNode = (*inputNodes().begin());
+  std::shared_ptr<Node> previousOutputNode = (*outputNodes().begin());
   std::shared_ptr<Node> newOutputNode;
 
   auto gNew = std::make_shared<GraphView>();
@@ -552,18 +551,15 @@ bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
 
   if (newNodes.empty()) {
     replacable = (outputNodes().size() == 1) &&
-                      (inputNodes().size() == 1) &&
-                      ((*outputNodes().begin())->nbOutputs() == 1) &&
-                      ((*inputNodes().begin())->nbInputs() == 1);
-    previousOutputNode = (*outputNodes().begin());
-    previousInputNode = (*inputNodes().begin());
+                 (inputNodes().size() == 1) &&
+                 ((*outputNodes().begin())->nbOutputs() == 1) &&
+                 ((*inputNodes().begin())->nbDataInputs() == 1);
     newOutputNode = previousInputNode->input(0).first;
   } else {
-    replacable = ((outputNodes().size() == gNew->outputNodes().size()) &&
-                     (outputNodes().size() == 1));
-    previousOutputNode = (*outputNodes().begin());
     newOutputNode = (*gNew->outputNodes().begin());
-    replacable = replacable && (previousOutputNode->nbOutputs() == newOutputNode->nbOutputs());
+    replacable = (outputNodes().size() == gNew->outputNodes().size()) &&
+                 (outputNodes().size() == 1) &&
+                 (previousOutputNode->nbOutputs() == newOutputNode->nbOutputs());
   }
 
   if (replacable) {
diff --git a/src/graphmatching/NodeRegex.cpp b/src/graphmatching/NodeRegex.cpp
index bbb116d1b12a31b491b26d2a64d04b416b61c6b7..9bf164f60255c17492e528b0f27dec8c53f74979 100644
--- a/src/graphmatching/NodeRegex.cpp
+++ b/src/graphmatching/NodeRegex.cpp
@@ -12,7 +12,7 @@
 #include "aidge/graphmatching/NodeRegex.hpp"
 
 
-// Verification done by the Parameter system
+// Verification done by the Attribute system
 
 
 // Version 1 - Only test the type of the node (no need for a lexer)
@@ -39,8 +39,8 @@ bool Aidge::NodeRegex::isA(std::string NodeType){
 /**bool NodeRegex::_is(string &Node_op){
     // Parsing the condition is done in the initialization of the NodeRegex
     
-    // assert parameters exist in the node with the parameter function isParam()
+    // assert attributes exist in the node with the attribute function hasAttr()
 
-    // get the parameters
+    // get the attributes
 
 }*/
diff --git a/src/recipies/FuseBatchNorm.cpp b/src/recipies/FuseBatchNorm.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3a50ec3e7f83517267ef4ad04cb2c855f8f9df7e
--- /dev/null
+++ b/src/recipies/FuseBatchNorm.cpp
@@ -0,0 +1,146 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <set>
+#include <cassert>
+#include <memory>
+#include <string>
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/Conv.hpp"
+
+#include "aidge/utils/Recipies.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+// Graph Regex
+#include "aidge/graphmatching/GRegex.hpp"
+#include "aidge/graphmatching/NodeRegex.hpp"
+using namespace Aidge;
+
+void Aidge::fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes){
+
+    assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
+
+    // Assert the nodes types are correct to be fused
+    std::shared_ptr<Node> conv;
+    std::shared_ptr<Node> batchnorm;
+    for (const auto& element : nodes) {
+        assert((element->type() == "Conv" || element->type() == "BatchNorm") && "Wrong type for the nodes to replace");
+        if (element->type() == "Conv"){
+            conv = element;
+        }
+        else if (element->type() == "BatchNorm") {
+            batchnorm = element;
+        }
+    }
+    // TODO : check if batchnorm is the only child of the Conv or FC
+    std::shared_ptr<Tensor> scale  = batchnorm->input(1).first->getOperator()->getOutput(batchnorm->input(1).second);
+    std::shared_ptr<Tensor> shift  = batchnorm->input(2).first->getOperator()->getOutput(batchnorm->input(2).second);
+    std::shared_ptr<Tensor> b_mean = batchnorm->input(3).first->getOperator()->getOutput(batchnorm->input(3).second);
+    std::shared_ptr<Tensor> b_var  = batchnorm->input(4).first->getOperator()->getOutput(batchnorm->input(4).second);
+
+
+    // TODO : Find a way to remove the template
+    const float epsilon = std::static_pointer_cast<BatchNorm_Op<2>>(batchnorm->getOperator())->getAttr<float>("Epsilon");
+    DimSize_t convOutDims = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("OutChannels");
+
+
+    assert(scale->size()  == convOutDims);
+    assert(shift->size()  == convOutDims);
+    assert(b_mean->size() == convOutDims);
+    assert(b_var->size()  == convOutDims);
+    assert(epsilon > 0.0);
+    // TODO : no no_bias attribute ?
+    float meanVariance = 0.0;
+    unsigned int count = 0;
+
+    for (std::size_t output = 0; output < convOutDims; ++output) {
+        // TODO : get suppose datatype is float ..
+        if (b_var->get<float>(output) > 1.0e-12) {
+            meanVariance += b_var->get<float>(output);
+            ++count;
+        }
+        else {
+            printf("Zero-variance: %s [%lu]\n", conv->name().c_str(), output);
+        }
+    }
+    if (count > 0)
+        meanVariance /= count;
+    else {
+        printf("variance < 1e-12 for all outputs! Is the network correctly trained?\n");
+    }
+
+    const DimSize_t channelsSize = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("InChannels");
+
+    // TODO : suppose we have Conv2D ...
+    const std::array<DimSize_t, 2> kernelDims = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+
+    std::shared_ptr<Tensor> weight  = conv->input(1).first->getOperator()->getOutput(conv->input(1).second);
+    std::shared_ptr<Tensor> bias  = conv->input(2).first->getOperator()->getOutput(conv->input(2).second);
+
+    for (std::size_t output = 0; output < convOutDims; ++output) {
+        // Corrected for zero-variance issue:
+        // "A Quantization-Friendly Separable Convolution for MobileNets"
+        // https://arxiv.org/pdf/1803.08607.pdf
+        // to help post-training quantization
+        const float factor = scale->get<float>(output)
+            / std::sqrt(epsilon + ((b_var->get<float>(output) > 1.0e-12 || count == 0)
+                        ? b_var->get<float>(output) : meanVariance));
+        // Weights adjustments
+        for (std::size_t channel = 0; channel < channelsSize; ++channel) {
+            // TODO : Suppose kerneldims = 2
+            for(std::size_t k0 = 0; k0 < kernelDims[0]; ++ k0){
+                for(std::size_t k1 = 0; k1 < kernelDims[1]; ++ k1){
+                    std::vector<DimSize_t> currentIdx = {output, channel, k0, k1};
+                    // TODO : suppose weights are float
+                    float weightValue = weight->get<float>(currentIdx);
+                    weight->set<float>(currentIdx, weightValue*factor); // Update check it update Conv weights
+                }
+            }
+        }
+
+        // TODO : check if noBias==true is set, then set biasValue to 0
+        float biasValue = bias->get<float>(output);
+
+        biasValue = shift->get<float>(output) + (biasValue - b_mean->get<float>(output)) * factor;
+
+        bias->set<float>(output, biasValue);
+
+    }
+    auto g = std::make_shared<GraphView>();
+    g->add(std::set<std::shared_ptr<Node>>({
+        batchnorm,
+        batchnorm->input(1).first,
+        batchnorm->input(2).first,
+        batchnorm->input(3).first,
+        batchnorm->input(4).first
+    }));
+    g->replaceWith({});
+
+}
+
+void Aidge::fuseBatchNorm(std::shared_ptr<GraphView> graphView){
+    std::map<std::string,NodeRegex*> nodesRegex ;
+    nodesRegex["BatchNorm"] = new NodeRegex("BatchNorm");
+    nodesRegex["Conv"] = new NodeRegex("Conv");
+    nodesRegex["FC"] = new NodeRegex("FC");
+
+
+    std::vector<std::string> seqRegex;
+    seqRegex.push_back("Conv -> BatchNorm;"); // TODO: Add (Conv | FC)
+    GRegex GReg(nodesRegex, seqRegex);
+    Match matches = GReg.match(graphView);
+    std::vector<std::set<std::shared_ptr<Node>>> matchNodes = matches.getMatchNodes();
+    for (size_t i = 0; i < matches.getNbMatch(); ++i) {
+        fuseBatchNorm(matchNodes[i]);
+    }
+}
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipies/FuseMulAdd.cpp
index 561d25776a28f1aad8f8c943711887ec6661a10c..1de79890f9b597c4baff7427e01d7217f9695a44 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipies/FuseMulAdd.cpp
@@ -20,21 +20,18 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/GenericOperator.hpp"
-
+// Graph Regex
+#include "aidge/graphmatching/GRegex.hpp"
+#include "aidge/graphmatching/NodeRegex.hpp"
 using namespace Aidge;
 
-/**
- * @brief Merge MatMul and Add Node into FC.
- * 
- * @param nodes Strict set of Node to merge.
- */
 void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
     // Fuse Mulmat & Add into FC
     // Inputs : old nodes (pointers on mul & add)
-    
+
     assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
     // Too bad we lose information on the type after matching, how to keep the information after matching (not only for the type) ?
-    
+
     // Step 0 : Assert the nodes types are correct to be fused
     std::shared_ptr<Node> add;
     std::shared_ptr<Node> matmul;
@@ -53,7 +50,7 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
     auto producer_add_bias = add->input(1);
     Tensor& bias_tensor = (producer_add_bias.first)->getOperator()->output(0);
 
-    // Instanciate FC  
+    // Instanciate FC
     //std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
     std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(bias_tensor.dims()[0], false));
 
@@ -61,10 +58,12 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
     // link weights & bias
     if (matmul->getParent(1)==nullptr) {
         matmul->getParent(0)->addChild(fc, 0, 1);
+        printf("MatMul out[1] == nullptr !\n");
     } else {
+        printf("MatMul out[1] != nullptr !\n");
         if (matmul->getParent(0)!=nullptr)
             matmul->getParent(0)->addChild(fc, 0, 0);
-        matmul->getParent(1)->addChild(fc, 0, 1);
+        matmul->input(1).first->addChild(fc, 0, 1);
     }
     (producer_add_bias.first)->addChild(fc,0,2);
 
@@ -74,7 +73,22 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
         // Case 2 : If not all nodes are in a graph view : only delete the nodes from the graphview
         // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory ?
     auto nodeToReplace = std::make_shared<GraphView>();
-    nodeToReplace->add(nodes);
+    nodeToReplace->add(nodes, false);
     nodeToReplace->replaceWith({fc});
 
-}
\ No newline at end of file
+}
+
+void Aidge::fuseMulAdd(std::shared_ptr<GraphView> graphView){
+
+    std::map<std::string,NodeRegex*> nodesRegex ;
+    nodesRegex["MatMul"] = new NodeRegex("MatMul");
+    nodesRegex["Add"] = new NodeRegex("Add");
+    std::vector<std::string> seqRegex;
+    seqRegex.push_back("MatMul -> Add;");
+    GRegex GReg(nodesRegex, seqRegex);
+    Match matches = GReg.match(graphView);
+    std::vector<std::set<std::shared_ptr<Node>>> matchNodes = matches.getMatchNodes();
+    for (size_t i = 0; i < matches.getNbMatch(); ++i) {
+        fuseMulAdd(matchNodes[i]);
+    }
+}
diff --git a/src/recipies/LabelGraph.cpp b/src/recipies/LabelGraph.cpp
index 7ac2cbf6ca65c7ecbced9596efb71c2052405984..369336f7981198f962d8ab949309005be9ac5eb9 100644
--- a/src/recipies/LabelGraph.cpp
+++ b/src/recipies/LabelGraph.cpp
@@ -22,7 +22,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == Conv_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->get<ConvParam::KernelDims>(), op->get<ConvParam::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<ConvAttr::KernelDims>(), op->getAttr<ConvAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -30,7 +30,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == ConvDepthWise_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<ConvDepthWise_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->get<ConvDepthWiseParam::KernelDims>(), op->get<ConvDepthWiseParam::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<ConvDepthWiseAttr::KernelDims>(), op->getAttr<ConvDepthWiseAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -38,7 +38,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == AvgPooling_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<AvgPooling_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->get<AvgPoolingParam::KernelDims>(), op->get<AvgPoolingParam::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<AvgPoolingAttr::KernelDims>(), op->getAttr<AvgPoolingAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
diff --git a/src/recipies/RemoveFlatten.cpp b/src/recipies/RemoveFlatten.cpp
index cc3c3324e40636a1edcbc73cdc4a9dcfeec8a026..9096c107ba505f5f18993a761273552408db721b 100644
--- a/src/recipies/RemoveFlatten.cpp
+++ b/src/recipies/RemoveFlatten.cpp
@@ -15,10 +15,38 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/utils/Recipies.hpp"
 
+// Graph Regex
+#include "aidge/graphmatching/GRegex.hpp"
+#include "aidge/graphmatching/NodeRegex.hpp"
+
+
 namespace Aidge {
     void removeFlatten(std::set<std::shared_ptr<Node>> nodes) {
+        assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
+        std::shared_ptr<Node> flatten;
+        for (const auto& element : nodes) {
+            assert((element->type() == "FC" || element->type() == "Flatten") && "Wrong type for the nodes to replace");
+            if (element->type() == "Flatten"){
+                flatten = element;
+            }
+        }
         auto g = std::make_shared<GraphView>();
-        g->add(std::set<std::shared_ptr<Node>>({nodes}));
+        // TODO : avoid using replace_with and use a remove method instead
+        g->add(std::set<std::shared_ptr<Node>>({flatten}));
         g->replaceWith({});
     }
-}
\ No newline at end of file
+
+    void removeFlatten(std::shared_ptr<GraphView> graphView){
+        std::map<std::string,NodeRegex*> nodesRegex ;
+        nodesRegex["Flatten"] = new NodeRegex("Flatten");
+        nodesRegex["FC"] = new NodeRegex("FC");
+        std::vector<std::string> seqRegex;
+        seqRegex.push_back("Flatten->FC;");
+        GRegex GReg(nodesRegex, seqRegex);
+        Match matches = GReg.match(graphView);
+        std::vector<std::set<std::shared_ptr<Node>>> matchNodes = matches.getMatchNodes();
+        for (size_t i = 0; i < matches.getNbMatch(); ++i) {
+            removeFlatten(matchNodes[i]);
+        }
+    }
+}
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index dc0768d2b6f7a1dd46fc0a8523b950011f7dcf5d..4dc8eb5c84ddb25546a32a672bdc84685a6f79f0 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -34,6 +34,11 @@ void drawProgressBar(double progress, int barWidth, const std::string& additiona
 }
 
 void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
+    // TODO: For loop on the list of node to run
+    // run sequencially every runnable consumers once
+    // TODO: handle memory allocation in scheduler
+    // TODO: optimize memory usage
+
     // setup initial producers list
     mComputationNumber = 0;
     std::set<std::shared_ptr<Node>> producers;
@@ -74,16 +79,16 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
                        "\n\t\tR/C:\t",
                        (consumer->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(consumer.get()))).c_str());
                 for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) {
-                    printf("%ld/%ld\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
+                    printf("%zu/%zu\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
                            consumer->getOperator()->getNbRequiredData(inId));
                 }
-                printf("%ld/%ld", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
+                printf("%zu/%zu", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
                        consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
                 printf("\n\t\tP:\t");
                 for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) {
-                    printf("%ld\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
+                    printf("%zu\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
                 }
-                printf("%ld", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
+                printf("%zu", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
                 printf("\n");
             }
             bool isRunnable = true;
@@ -123,13 +128,13 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
                     printf("%ld/%ld\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
                            consumer->getOperator()->getNbRequiredData(inId));
                 }
-                printf("%ld/%ld", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
+                printf("%zu/%zu", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
                        consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
                 printf("\n\t\tP:\t");
                 for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) {
-                    printf("%ld\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
+                    printf("%zu\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
                 }
-                printf("%ld", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
+                printf("%zu", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
                 printf("\n");
             }
             bool isStillConsumer = false;
@@ -180,35 +185,20 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
     mScheduling.clear();
 
     this->generateScheduling();
-
-    // TODO: For loop on the list of node to run
-    // run sequencially every runnable consumers once
-    // TODO: handle memory allocation in scheduler
-    // TODO: optimize memory usage
+    int cpt = 0;
     for (const auto& runnable : mStaticSchedule) {
-        bool computationOverForConsumer = true;
-        for (IOIndex_t parentIDi = 0; parentIDi < runnable->nbInputs(); ++parentIDi) {
-            if (runnable->getOperator()->getNbConsumedData(parentIDi) <
-                runnable->getOperator()->getNbRequiredData(parentIDi)) {
-                computationOverForConsumer = false;
-                break;
-            }
-        }
-        if (computationOverForConsumer) {
-            computationOver.insert(runnable);
-        }
-
         if (verbose)
             printf("run: %s\n",
                     (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str());
         else
-            drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(mComputationNumber), 50,
+            drawProgressBar(static_cast<float>(cpt) / static_cast<float>(mStaticSchedule.size()), 50,
                             (std::string("running ") + runnable->type() + "_" +
                                 std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))));
         const auto tStart = std::chrono::high_resolution_clock::now();
         runnable->forward();
         const auto tEnd = std::chrono::high_resolution_clock::now();
         mScheduling.push_back(SchedulingElement(runnable, tStart, tEnd));
+        cpt++;
     }
     if (!verbose) drawProgressBar(1.0, 50, "                                   ");
     printf("\n");
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 2208399897f586becca798eb469344af01dbab64..8d634cc3a105c423b54b6003f41204aeb1fc5335 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -17,72 +17,72 @@
 
 using namespace Aidge;
 
-TEST_CASE("[core/operators] GenericOp(add & get parameters)", "[Operator]") {
+TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("INT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        const char* key = "intParam";
-        Testop.addParameter(key, int(5));
-        int registeredVal = Testop.getParameter<int>(key);
+        const char* key = "intAttr";
+        Testop.addAttr(key, int(5));
+        int registeredVal = Testop.getAttr<int>(key);
         REQUIRE(registeredVal == 5);
     }
     SECTION("LONG") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         long value = 3;
-        const char* key = "longParam";
-        Testop.addParameter(key, value);
-        REQUIRE(Testop.getParameter<long>(key) == value);
+        const char* key = "longAttr";
+        Testop.addAttr(key, value);
+        REQUIRE(Testop.getAttr<long>(key) == value);
     }
     SECTION("FLOAT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         float value = 2.0;
-        const char* key = "floatParam";
-        Testop.addParameter(key, value);
-        REQUIRE(Testop.getParameter<float>(key) == value);
+        const char* key = "floatAttr";
+        Testop.addAttr(key, value);
+        REQUIRE(Testop.getAttr<float>(key) == value);
     }
      SECTION("VECTOR<BOOL>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<bool> value = {true, false, false, true, true};
         const char* key = "vect";
-        Testop.addParameter(key, value);
+        Testop.addAttr(key, value);
 
-        REQUIRE(Testop.getParameter<std::vector<bool>>(key).size() == value.size());
+        REQUIRE(Testop.getAttr<std::vector<bool>>(key).size() == value.size());
         for (std::size_t i=0; i < value.size(); ++i){
-            REQUIRE(Testop.getParameter<std::vector<bool>>(key)[i] == value[i]);
+            REQUIRE(Testop.getAttr<std::vector<bool>>(key)[i] == value[i]);
         }
     }
     SECTION("VECTOR<INT>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<int> value = {1, 2, 3, 4, 5, 6, 7, 8, 9};
         const char* key = "vect";
-        Testop.addParameter(key, value);
+        Testop.addAttr(key, value);
 
-        REQUIRE(Testop.getParameter<std::vector<int>>(key).size() == value.size());
+        REQUIRE(Testop.getAttr<std::vector<int>>(key).size() == value.size());
         for (std::size_t i=0; i < value.size(); ++i){
-            REQUIRE(Testop.getParameter<std::vector<int>>(key)[i] == value[i]);
+            REQUIRE(Testop.getAttr<std::vector<int>>(key)[i] == value[i]);
         }
     }
     SECTION("MULTIPLE PARAMS") {
         /*
-        Goal : Test that the offsets are well done by adding different parameters with different size.
+        Goal : Test that the offsets are well done by adding different attributes with different size.
         */
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addParameter<long>("longParam", 3);
-        Testop.addParameter<float>("floatParam", 2.0);
-        Testop.addParameter<uint8_t>("uint8Param", 5);
-        Testop.addParameter<long long>("llParam", 10);
-        REQUIRE(Testop.getParameter<long>("longParam") == 3);
-        REQUIRE(Testop.getParameter<float>("floatParam") == 2.0);
-        REQUIRE(Testop.getParameter<uint8_t>("uint8Param") == 5);
-        REQUIRE(Testop.getParameter<long long>("llParam") == 10);
+        Testop.addAttr<long>("longAttr", 3);
+        Testop.addAttr<float>("floatAttr", 2.0);
+        Testop.addAttr<uint8_t>("uint8Attr", 5);
+        Testop.addAttr<long long>("llAttr", 10);
+        REQUIRE(Testop.getAttr<long>("longAttr") == 3);
+        REQUIRE(Testop.getAttr<float>("floatAttr") == 2.0);
+        REQUIRE(Testop.getAttr<uint8_t>("uint8Attr") == 5);
+        REQUIRE(Testop.getAttr<long long>("llAttr") == 10);
     }
 }
 
-TEST_CASE("[core/operator] GenericOp(type check)", "[.ass]") {
+TEST_CASE("[core/operator] GenericOp(type check)", "[Operator]") {
     SECTION("WRONG TYPE FOR GETTER") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addParameter<long>("longParam", 3);
+        Testop.addAttr<long>("longAttr", 3);
 
         // This line should raise a failled assert
-        REQUIRE_THROWS(Testop.getParameter<int>("longParameter"));
+        REQUIRE_THROWS(Testop.getAttr<int>("longAttribute"));
     }
 }
diff --git a/unit_tests/recipies/Test_FuseMulAdd.cpp b/unit_tests/recipies/Test_FuseMulAdd.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..da53642055a3146c71a211ad7816f21c9b92d6cd
--- /dev/null
+++ b/unit_tests/recipies/Test_FuseMulAdd.cpp
@@ -0,0 +1,77 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <set>
+
+// #include "aidge/backend/cpu/operator/AddImpl.hpp"
+// #include "aidge/backend/cpu/operator/ConvImpl.hpp"
+// #include "aidge/backend/cpu/operator/FCImpl.hpp"
+// #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Recipies.hpp"
+
+namespace Aidge {
+
+TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
+    // generate the original GraphView
+    auto matmul0 = MatMul(5, "matmul0");
+    auto add0 = Add<2>("add0");
+    auto matmul1 = MatMul(5, "matmul1");
+    auto add1 = Add<2>("add1");
+
+    auto b0 = Producer({5}, "B0");
+    auto w0 = Producer({5, 5}, "W0");
+    auto b1 = Producer({5}, "B1");
+    auto w1 = Producer({5,5},"W1");
+    auto input = Producer({2,5}, "input");
+
+    input->addChild(matmul0, 0, 0);
+    w0->addChild(matmul0, 0, 1);
+
+    matmul0->addChild(add0, 0, 0);
+    b0->addChild(add0, 0, 1);
+
+    add0->addChild(matmul1, 0, 0);
+    w1->addChild(matmul1, 0, 1);
+
+    matmul1->addChild(add1, 0, 0);
+    b1->addChild(add1, 0, 1);
+
+    auto g = std::make_shared<GraphView>();
+    g->add({matmul0, add0, matmul1, add1, b0, b1});
+
+    // Check original graph
+    REQUIRE(g->getNodes() ==
+            std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
+    REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
+    REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0)));
+    REQUIRE(((matmul1->getParent(0) == add0) && (matmul1->getParent(1) == w1)));
+    REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
+
+	// Transform GraphView inplace
+    fuseMulAdd(g);
+	g->save("bonjour");
+
+	// Check new GraphView
+	 std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
+	REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
+	REQUIRE(newNodes.size() == 6);
+	for (const auto& node : newNodes) {
+		REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
+	}
+}
+}  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/utils/Test_StaticAttributes.cpp b/unit_tests/utils/Test_StaticAttributes.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..36c2e0454b415e1cb25cc3581016530a372b9e65
--- /dev/null
+++ b/unit_tests/utils/Test_StaticAttributes.cpp
@@ -0,0 +1,48 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include <string>
+#include <vector>
+
+#include "aidge/utils/StaticAttributes.hpp"
+
+using namespace Aidge;
+
+enum class TestAttr { a, b, c, d };
+
+namespace {
+template <>
+const char *const EnumStrings<TestAttr>::data[] = {
+    "a",
+    "b",
+    "c",
+    "d"
+};
+}
+
+using Attributes_ = StaticAttributes<TestAttr, int, float, std::string, std::vector<bool>>;
+template <TestAttr e>
+using attr = typename Attributes_::template attr<e>;
+
+TEST_CASE("[core/attributes] StaticAttribute") {
+    SECTION("TestAttr") {
+        StaticAttributes<TestAttr, int, float, std::string, std::vector<bool>> attrs(
+            attr<TestAttr::a>(42),
+            attr<TestAttr::b>(18.75),
+            attr<TestAttr::c>("test"),
+            attr<TestAttr::d>({true, false, true}));
+
+        REQUIRE(attrs.getAttr<int>("a") == 42);
+        REQUIRE_THROWS(attrs.getAttr<int>("inexistant"));
+    }
+}