diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index 73b85c8a409e675c849b9ca66557c63b5acf6359..cd56a55fa7e9cbcefba4715188fd270462e81976 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -27,6 +27,8 @@ build:ubuntu_python:
     - python3 -m pip install virtualenv
     - virtualenv venv
     - source venv/bin/activate
+    # Numpy dependancy for unit test
+    - python3 -m pip install numpy
     - export AIDGE_INSTALL=`pwd`/install
     - python3 -m pip install .
   artifacts:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f79bf02f2170652314282e2943d44b2e2abc245c..ef8892ecc8b60684ca8f44ce011d8f07f4f3616d 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -75,6 +75,19 @@ set(SANITIZE_FLAGS)
 set(SANITIZE_MSVC_FLAGS)
 endif()
 
+if (DOSANITIZE STREQUAL "ON")
+set(SANITIZE_FLAGS -fsanitize=address,leak,undefined,float-divide-by-zero -fno-omit-frame-pointer)
+#TODO sanitizer seems buggy in some situations with msvc, leading to linker errors, temporarily inactivating it
+#set(SANITIZE_MSVC_FLAGS /fsanitize=address)
+else()
+set(SANITIZE_FLAGS)
+set(SANITIZE_MSVC_FLAGS)
+endif()
+
+# -fvisibility=hidden required by pybind11
+target_compile_options(${module_name} PUBLIC
+    $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
+    -fvisibility=hidden>)
 target_compile_options(${module_name} PRIVATE
 $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
 -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -fstrict-aliasing -Wstrict-aliasing=1 $<$<BOOL:${WERROR}>:-Werror> ${SANITIZE_FLAGS}>)
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index 8898bc5a7ac6ce771cab8402933d464c1f04316f..fc60f52274162155f8f891bf86c22c9a13b241f4 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -30,36 +30,67 @@ class test_operator_binding(unittest.TestCase):
         self.assertNotEqual(gop.name(), "")
 
     def test_param_bool(self):
-        self.generic_operator.add_parameter("bool", True)
-        self.assertEqual(self.generic_operator.get_parameter("bool"), True)
+        self.generic_operator.add_attr("bool", True)
+        self.assertEqual(self.generic_operator.has_attr("bool"), True)
+        self.assertEqual(self.generic_operator.get_attr("bool"), True)
+        self.assertEqual(self.generic_operator.get_attr_type("bool"), "bool")
+        self.assertEqual(self.generic_operator.get_attrs_name(), {"bool"})
+        self.generic_operator.del_attr("bool")
+        self.assertEqual(self.generic_operator.has_attr("bool"), False)
+        self.assertEqual(len(self.generic_operator.get_attrs_name()), 0)
 
     def test_param_int(self):
-        self.generic_operator.add_parameter("int", 1)
-        self.assertEqual(self.generic_operator.get_parameter("int"), 1)
+        self.generic_operator.add_attr("int", 1)
+        self.assertEqual(self.generic_operator.get_attr("int"), 1)
 
     def test_param_float(self):
-        self.generic_operator.add_parameter("float", 2.0)
-        self.assertEqual(self.generic_operator.get_parameter("float"), 2.0)
+        self.generic_operator.add_attr("float", 2.0)
+        self.assertEqual(self.generic_operator.get_attr("float"), 2.0)
 
     def test_param_str(self):
-        self.generic_operator.add_parameter("str", "value")
-        self.assertEqual(self.generic_operator.get_parameter("str"), "value")
+        self.generic_operator.add_attr("str", "value")
+        self.assertEqual(self.generic_operator.get_attr("str"), "value")
 
     def test_param_l_int(self):
-        self.generic_operator.add_parameter("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
-        self.assertEqual(self.generic_operator.get_parameter("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.generic_operator.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.assertEqual(self.generic_operator.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
 
     def test_param_l_bool(self):
-        self.generic_operator.add_parameter("l_bool", [True, False, False, True])
-        self.assertEqual(self.generic_operator.get_parameter("l_bool"), [True, False, False, True])
+        self.generic_operator.add_attr("l_bool", [True, False, False, True])
+        self.assertEqual(self.generic_operator.get_attr("l_bool"), [True, False, False, True])
 
     def test_param_l_float(self):
-        self.generic_operator.add_parameter("l_float", [2.0, 1.0])
-        self.assertEqual(self.generic_operator.get_parameter("l_float"), [2.0, 1.0])
+        self.generic_operator.add_attr("l_float", [2.0, 1.0])
+        self.assertEqual(self.generic_operator.get_attr("l_float"), [2.0, 1.0])
 
     def test_param_l_str(self):
-        self.generic_operator.add_parameter("l_str", ["ok"])
-        self.assertEqual(self.generic_operator.get_parameter("l_str"), ["ok"])
+        self.generic_operator.add_attr("l_str", ["ok"])
+        self.assertEqual(self.generic_operator.get_attr("l_str"), ["ok"])
+
+    def test_dynamicattribute_binding(self):
+        # Check original C++ attributes are binded
+        attrs = aidge_core.test_DynamicAttributes_binding()
+        self.assertEqual(attrs.has_attr("a"), True)
+        self.assertEqual(attrs.get_attr("a"), 42)
+        self.assertEqual(attrs.has_attr("b"), True)
+        self.assertEqual(attrs.get_attr("b"), "test")
+        self.assertEqual(attrs.has_attr("c"), True)
+        self.assertEqual(attrs.get_attr("c"), [True, False, True])
+        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c"})
+        self.assertEqual(attrs.has_attr("d"), False)
+
+        # Add Python attributes
+        attrs.add_attr("d", 18.56)
+        self.assertEqual(attrs.get_attr("d"), 18.56)
+        self.assertEqual(attrs.has_attr("d"), True)
+        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c", "d"})
+        self.assertEqual(attrs.has_attr("e"), False)
+
+        # Check that added Python attribute is accessible in C++
+        # Return the value of an attribute named "d" of type float64 (double in C++)
+        self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 18.56)
+        attrs.set_attr("d", 23.89)
+        self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
 
     def test_compute_output_dims(self):
         in_dims=[25, 25]
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index 02c7598820d2429bc49ff9a2f02c8ee841783173..566650713c36236c19763f466ee906970466c02e 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -11,7 +11,7 @@ SPDX-License-Identifier: EPL-2.0
 import unittest
 import aidge_core
 
-class test_parameters(unittest.TestCase):
+class test_attributes(unittest.TestCase):
     """Very basic test to make sure the python APi is not broken.
     Can be remove in later stage of the developpement.
     """
@@ -27,21 +27,21 @@ class test_parameters(unittest.TestCase):
         out_channels = 8
         k_dims = [2, 2]
         conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator()
-        self.assertEqual(conv_op.get("InChannels"), in_channels)
-        self.assertEqual(conv_op.get("OutChannels"), out_channels)
-        self.assertEqual(conv_op.get("KernelDims"), k_dims)
+        self.assertEqual(conv_op.get_attr("InChannels"), in_channels)
+        self.assertEqual(conv_op.get_attr("OutChannels"), out_channels)
+        self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
 
     def test_fc(self):
         out_channels = 8
         nb_bias = True
         fc_op = aidge_core.FC(out_channels, nb_bias).get_operator()
-        self.assertEqual(fc_op.get("OutChannels"), out_channels)
-        self.assertEqual(fc_op.get("NoBias"), nb_bias)
+        self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
+        self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
 
     def test_matmul(self):
         out_channels = 8
-        matmul_op = aidge_core.Matmul(out_channels).get_operator()
-        self.assertEqual(matmul_op.get("OutChannels"), out_channels)
+        matmul_op = aidge_core.MatMul(out_channels).get_operator()
+        self.assertEqual(matmul_op.get_attr("OutChannels"), out_channels)
 
     def test_producer_1D(self):
         dims = [5]
@@ -71,7 +71,7 @@ class test_parameters(unittest.TestCase):
     def test_leaky_relu(self):
         negative_slope = 0.25
         leakyrelu_op = aidge_core.LeakyReLU(negative_slope).get_operator()
-        self.assertEqual(leakyrelu_op.get("NegativeSlope"), negative_slope)
+        self.assertEqual(leakyrelu_op.get_attr("NegativeSlope"), negative_slope)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/aidge_core/unit_tests/test_recipies.py b/aidge_core/unit_tests/test_recipies.py
new file mode 100644
index 0000000000000000000000000000000000000000..754907443530f7e73d1e10ed9549d0c8eb78a011
--- /dev/null
+++ b/aidge_core/unit_tests/test_recipies.py
@@ -0,0 +1,78 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+
+class test_recipies(unittest.TestCase):
+    """
+    """
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    def test_remove_flatten(self):
+        graph_view = aidge_core.sequential([
+            aidge_core.GenericOperator("Flatten", 1, 1, 1, name="Flatten0"),
+            aidge_core.FC(50, name='0')
+        ])
+        old_nodes = graph_view.get_nodes()
+        aidge_core.remove_flatten(graph_view)
+        self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 1)
+        self.assertTrue("Flatten0" not in [i.name for i in graph_view.get_nodes()])
+
+        self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
+
+    def test_fuse_matmul_add(self):
+        matmul0 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul0")
+        add0 = aidge_core.Add(name="Add0")
+        matmul1 = aidge_core.GenericOperator("MatMul", 1, 2, 1, name="MatMul1")
+        add1 = aidge_core.Add(name="Add1")
+
+        graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
+
+        w0 = aidge_core.Producer([1, 1], name="W0")
+        w0.add_child(matmul0, 0, 1)
+        graph_view.add(w0)
+
+        b0 = aidge_core.Producer([1], name="B0")
+        b0.add_child(add0, 0, 1)
+        graph_view.add(b0)
+
+        w1 = aidge_core.Producer([1, 1], name="W1")
+        w1.add_child(matmul1, 0, 1)
+        graph_view.add(w1)
+
+        b1 = aidge_core.Producer([1], name="B1")
+        b1.add_child(add1, 0, 1)
+        graph_view.add(b1)
+
+        old_nodes = graph_view.get_nodes()
+        aidge_core.fuse_mul_add(graph_view)
+
+        self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 2)
+        self.assertTrue("MatMul0" not in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("Add0" not in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("MatMul1" not in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("Add1" not in [i.name() for i in graph_view.get_nodes()])
+
+        self.assertTrue("W0" in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("B0" in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("W1" in [i.name() for i in graph_view.get_nodes()])
+        self.assertTrue("B1" in [i.name() for i in graph_view.get_nodes()])
+        # TODO : Vérifier que FC bien crée
+
+if __name__ == '__main__':
+    unittest.main()
+
+
+
diff --git a/aidge_core/unit_tests/test_tensor.py b/aidge_core/unit_tests/test_tensor.py
new file mode 100644
index 0000000000000000000000000000000000000000..a214a0e354c64b515d0a7ac24d81c85e116938ca
--- /dev/null
+++ b/aidge_core/unit_tests/test_tensor.py
@@ -0,0 +1,44 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+
+from functools import reduce
+import numpy as np
+
+class test_tensor(unittest.TestCase):
+    """
+    """
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    def test_getcoord_getidx(self):
+        dims = [2,2,2]
+        size = reduce((lambda x, y: x*y), dims)
+
+        np_array = np.arange(size).reshape(dims)
+
+        t = aidge_core.Tensor(np_array)
+        for i in range(size):
+            coord = t.get_coord(i)
+            idx = t.get_idx(coord)
+            self.assertEqual(idx, i)
+
+if __name__ == '__main__':
+    unittest.main()
+
+
+
+
+
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index cfda3ac7fa024f8cf80b4589d978b9b5bff5b4f0..47ded2a462477958320bfad3ad84e6b8f6ef6082 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -33,7 +33,7 @@
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/GenericOperator.hpp"
-#include "aidge/operator/Matmul.hpp"
+#include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 //#include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -42,8 +42,9 @@
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
-#include "aidge/utils/CParameter.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Recipies.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index eade2331c93ed45586c1b6831590bd6973742b0a..08a1d19bbc64fa393218abd246cf853d9ac5f527 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -92,6 +92,13 @@ public:
         return mvDimensions;
     };
 
+    /// @brief Gets the data type size in bytes.
+    /// @return Data type size in bytes.
+    inline std::size_t GetScalarSize() const noexcept
+    {
+        return mScalarSize;
+    };
+
     /// @brief gets the logical coordinates of the data stored at mStorage.
     /// @returns Logical coordinates of the data stored at mStorage.
     inline std::vector<Coord_t> const &GetFirstDataCoordinates() const noexcept
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 11085c6e6a9c90f51ca0c113e6841da7fe9829b7..32f55d0a9e6c564ff70a224530d6bc21369dd39c 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_DATA_H_
 #define AIDGE_DATA_H_
 
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Attributes.hpp"
 
 namespace Aidge
 {
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index dbbaba4f60d6f2788ce5796cfb8262ec6a6173b6..1b628cf2a0b942ee94e237014c2f96940c9c8735 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -329,7 +329,7 @@ public:
      * @return false: does not have an implementation
      * @deprecated ?
      */
-    bool HasImpl() const
+    bool hasImpl() const
     {
         return (mImpl) ? true : false;
     }
@@ -424,30 +424,39 @@ public:
         return mDims.empty();
     }
 
-    /// @brief Retrieve the data at given index, from the first storage byte.
-    /// @tparam expectedType Expected stored data type
-    /// @param idx index
-    /// @return reference to the data stored at given byte.
-    template<typename expectedType> expectedType &get(std::size_t const idx) noexcept
+    /// @brief Get element by its index
+    /// @tparam expectedType expected type of the stored element
+    /// @param idx offset in number of elements from the first stored element
+    /// @return read-write reference to the element
+    /// @bug this function is plain wrong and can be used only on cpu backend
+    /// @todo redesign
+    template<typename expectedType> expectedType &get(NbElts_t const idx) noexcept
     {
         ///@todo : add assert expected Type compatible with datatype
         ///@todo : add assert idx < Size
-        return *reinterpret_cast<expectedType *>(GetDataAddress() + idx);
+        return *reinterpret_cast<expectedType *>(
+            GetDataAddress() + idx * GetScalarSize());
     }
 
     template<typename expectedType>
     expectedType &get(std::vector<Coord_t> const &coordIdx)
     {
-        return get<expectedType>(getIdx(coordIdx));
+        return get<expectedType>(getIdx(coordIdx) / GetScalarSize());
     }
 
-    template<typename expectedType> void set(std::size_t idx, expectedType value)
+    /// @brief Set teh value of an element identified by its index
+    /// @tparam expectedType expected type of the stored element
+    /// @param idx offset in number of elements from the first stored element
+    /// @return read-write reference to the element
+    /// @bug this function is plain wrong and can be used only on cpu backend
+    /// @todo redesign
+    template<typename expectedType> void set(NbElts_t const idx, expectedType const value)
     {
         ///@todo : add assert expected Type compatible with datatype
         ///@todo : add assert idx < Size
-        unsigned char *dataPtr = GetDataAddress() + idx;
+        unsigned char *dataPtr = GetDataAddress() + idx * GetScalarSize();
         ///@bug only valid for trivially copyable data
-        std::memcpy(dataPtr, &value, sizeof(expectedType));
+        std::memcpy(dataPtr, &value, GetScalarSize());
     }
 
     template<typename expectedType>
@@ -487,6 +496,11 @@ private:
     /// @return Address of the very first data in memory (lexicographic order)
     Byte_t *GetDataAddress() noexcept;
 
+    /// @brief Getting the size of the stored data type in bytes.
+    /// @return Size of the stored data type in bytes.
+    /// @todo Redesign as this does not depend on implementation and can be inlined
+    std::size_t GetScalarSize() const noexcept;
+
     ///\bug not protected against overflow
     std::size_t computeSize()
     {
diff --git a/include/aidge/hook/hook.hpp b/include/aidge/hook/hook.hpp
index 0448659b937c3498f57cae9935196ef2f38ecf6d..28f7ef5cddbc649af50209ba77527b8b75d731b7 100644
--- a/include/aidge/hook/hook.hpp
+++ b/include/aidge/hook/hook.hpp
@@ -17,7 +17,7 @@
 #ifndef Hook_H_
 #define Hook_H_
 
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include <memory>
 
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 6286b84c58b67cec738bdd3c3359a4596a30a83c..377bc84ea57ac6565d3c7440511a35a8c66f1a3d 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -53,7 +53,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 7e4fba1aacc202fd9c232913db101ed468c61a31..a79866c7791b214f11608918e13357dfd9c9c1c3 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -21,13 +21,13 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge
 {
-enum class AvgPoolingParam
+enum class AvgPoolingAttr
 {
     StrideDims,
     KernelDims,
@@ -40,8 +40,8 @@ class AvgPooling_Op : public Operator,
                           AvgPooling_Op<DIM>,
                           std::string,
                           std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
-                      public Parameterizable<
-                          AvgPoolingParam,
+                      public StaticAttributes<
+                          AvgPoolingAttr,
                           std::array<DimSize_t, DIM>,
                           std::array<DimSize_t, DIM>,
                           std::array<DimSize_t, (DIM << 1)>>
@@ -56,13 +56,12 @@ public:
 
     AvgPooling_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<
-        AvgPoolingParam,
+    using Attributes_ = StaticAttributes<
+        AvgPoolingAttr,
         std::array<DimSize_t, DIM>,
         std::array<DimSize_t, DIM>,
         std::array<DimSize_t, (DIM << 1)>>;
-    template<AvgPoolingParam e>
-    using param = typename Parameterizable_::template param<e>;
+    template<AvgPoolingAttr e> using attr = typename Attributes_::template attr<e>;
 
     constexpr AvgPooling_Op(
         const std::array<DimSize_t, DIM> &kernel_dims,
@@ -70,23 +69,21 @@ public:
         const std::array<DimSize_t, (DIM << 1)> &padding_dims
         = create_array<DimSize_t, (DIM << 1)>(0)) :
         Operator(Type),
-        Parameterizable_(
-            param<AvgPoolingParam::StrideDims>(stride_dims),
-            param<AvgPoolingParam::KernelDims>(kernel_dims),
-            param<AvgPoolingParam::PaddingDims>(padding_dims))
+        Attributes_(
+            attr<AvgPoolingAttr::StrideDims>(stride_dims),
+            attr<AvgPoolingAttr::KernelDims>(kernel_dims),
+            attr<AvgPoolingAttr::PaddingDims>(padding_dims))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     AvgPooling_Op(const AvgPooling_Op<DIM> &op) :
-        Operator(Type),
-        Parameterizable_(op),
-        mOutput(std::make_shared<Tensor>(*op.mOutput))
+        Operator(Type), Attributes_(op), mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
         setDatatype(op.mOutput->dataType());
@@ -123,7 +120,7 @@ public:
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
             for (std::size_t dim = 0;
-                 dim < this->template get<AvgPoolingParam::KernelDims>().size();
+                 dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size();
                  ++dim)
             {
                 outputDims[dim + 2]
@@ -131,12 +128,12 @@ public:
                       + static_cast<DimSize_t>(std::floor(
                           static_cast<float>(
                               mInput->dims()[dim + 2]
-                              - this->template get<AvgPoolingParam::KernelDims>()[dim]
-                              + this->template get<AvgPoolingParam::PaddingDims>()[dim]
-                              + this->template get<
-                                  AvgPoolingParam::PaddingDims>()[dim + DIM])
-                          / static_cast<float>(
-                              this->template get<AvgPoolingParam::StrideDims>()[dim])));
+                              - this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]
+                              + this->template getAttr<AvgPoolingAttr::PaddingDims>()[dim]
+                              + this->template getAttr<
+                                  AvgPoolingAttr::PaddingDims>()[dim + DIM])
+                          / static_cast<float>(this->template getAttr<
+                                               AvgPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = mInput->dims()[1];
             outputDims[0] = mInput->dims()[0];
@@ -255,7 +252,7 @@ inline std::shared_ptr<Node> AvgPooling(
 namespace
 {
 template<>
-const char *const EnumStrings<Aidge::AvgPoolingParam>::data[]
+const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[]
     = {"StrideDims", "KernelDims", "PaddingDims"};
 }
 
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index b3cff89c60970f8250dea0bcee430e09c62e3025..21e86db56df04d6f129a473817bc56da542342f5 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -20,13 +20,13 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge
 {
-enum class BatchNormParam
+enum class BatchNormAttr
 {
     Epsilon,
     Momentum
@@ -38,7 +38,7 @@ class BatchNorm_Op : public Operator,
                          BatchNorm_Op<DIM>,
                          std::string,
                          std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
-                     public Parameterizable<BatchNormParam, float, float>
+                     public StaticAttributes<BatchNormAttr, float, float>
 {
 public:
     // FIXME: change accessibility
@@ -55,28 +55,26 @@ public:
 
     BatchNorm_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<BatchNormParam, float, float>;
-    template<BatchNormParam e> using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<BatchNormAttr, float, float>;
+    template<BatchNormAttr e> using attr = typename Attributes_::template attr<e>;
 
     constexpr BatchNorm_Op(float epsilon, float momentum) :
         Operator(Type),
-        Parameterizable_(
-            param<BatchNormParam::Epsilon>(epsilon),
-            param<BatchNormParam::Momentum>(momentum)),
+        Attributes_(
+            attr<BatchNormAttr::Epsilon>(epsilon),
+            attr<BatchNormAttr::Momentum>(momentum)),
         mOutput(std::make_shared<Tensor>())
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     BatchNorm_Op(const BatchNorm_Op<DIM> &op) :
-        Operator(Type),
-        Parameterizable_(op),
-        mOutput(std::make_shared<Tensor>(*op.mOutput))
+        Operator(Type), Attributes_(op), mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
         setDatatype(op.mOutput->dataType());
@@ -122,10 +120,6 @@ public:
             {
                 if (mInputs[i]->size() != mInputs[0]->dims()[1])
                 {
-                    assert(
-                        !mInputs[0]->HasImpl()
-                        && "Incompatible size with already implemented learnable "
-                           "parameter");
                     mInputs[i]->resize(std::array<DimSize_t, 1>({mInputs[0]->dims()[1]}));
                 }
             }
@@ -233,7 +227,7 @@ inline std::shared_ptr<Node> BatchNorm(
 namespace
 {
 template<>
-const char *const EnumStrings<Aidge::BatchNormParam>::data[] = {"Epsilon", "Momentum"};
+const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = {"Epsilon", "Momentum"};
 }
 
-#endif // AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
+#endif // AIDGE_CORE_OPERATOR_BATCHNORM_H_
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index b32b7ff3501e6fb67a10e5eca268a2426874e653..127d1ceb225f4a1683c8b69d547ef4472f1456d6 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -21,13 +21,13 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge
 {
-enum class ConvParam
+enum class ConvAttr
 {
     StrideDims,
     DilationDims,
@@ -43,8 +43,8 @@ class Conv_Op : public Operator,
                     Conv_Op<DIM>,
                     std::string,
                     std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
-                public Parameterizable<
-                    ConvParam,
+                public StaticAttributes<
+                    ConvAttr,
                     std::array<DimSize_t, DIM>,
                     std::array<DimSize_t, DIM>,
                     DimSize_t,
@@ -65,15 +65,15 @@ public:
 
     Conv_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<
-        ConvParam,
+    using Attributes_ = StaticAttributes<
+        ConvAttr,
         std::array<DimSize_t, DIM>,
         std::array<DimSize_t, DIM>,
         DimSize_t,
         DimSize_t,
         std::array<DimSize_t, DIM>,
         std::array<DimSize_t, (DIM << 1)>>;
-    template<ConvParam e> using param = typename Parameterizable_::template param<e>;
+    template<ConvAttr e> using attr = typename Attributes_::template attr<e>;
 
     constexpr Conv_Op(
         DimSize_t in_channels,
@@ -85,26 +85,24 @@ public:
         const std::array<DimSize_t, DIM> &dilation_dims
         = create_array<DimSize_t, DIM>(1)) :
         Operator(Type),
-        Parameterizable_(
-            param<ConvParam::StrideDims>(stride_dims),
-            param<ConvParam::DilationDims>(dilation_dims),
-            param<ConvParam::InChannels>(in_channels),
-            param<ConvParam::OutChannels>(out_channels),
-            param<ConvParam::KernelDims>(kernel_dims),
-            param<ConvParam::PaddingDims>(padding_dims))
+        Attributes_(
+            attr<ConvAttr::StrideDims>(stride_dims),
+            attr<ConvAttr::DilationDims>(dilation_dims),
+            attr<ConvAttr::InChannels>(in_channels),
+            attr<ConvAttr::OutChannels>(out_channels),
+            attr<ConvAttr::KernelDims>(kernel_dims),
+            attr<ConvAttr::PaddingDims>(padding_dims))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Conv_Op(const Conv_Op<DIM> &op) :
-        Operator(Type),
-        Parameterizable_(op),
-        mOutput(std::make_shared<Tensor>(*op.mOutput))
+        Operator(Type), Attributes_(op), mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
         setDatatype(op.mOutput->dataType());
@@ -153,12 +151,12 @@ public:
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
             for (std::size_t dim = 0;
-                 dim < this->template get<ConvParam::KernelDims>().size();
+                 dim < this->template getAttr<ConvAttr::KernelDims>().size();
                  ++dim)
             {
                 const DimSize_t kernelExtent
-                    = this->template get<ConvParam::DilationDims>()[dim]
-                          * (this->template get<ConvParam::KernelDims>()[dim] - 1)
+                    = this->template getAttr<ConvAttr::DilationDims>()[dim]
+                          * (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1)
                       + 1;
 
                 outputDims[dim + 2]
@@ -166,13 +164,14 @@ public:
                       + static_cast<DimSize_t>(floor(
                           static_cast<float>(
                               mInputs[0]->dims()[dim + 2] - kernelExtent
-                              + this->template get<ConvParam::PaddingDims>()[dim]
-                              + this->template get<ConvParam::PaddingDims>()[dim + DIM])
+                              + this->template getAttr<ConvAttr::PaddingDims>()[dim]
+                              + this->template getAttr<
+                                  ConvAttr::PaddingDims>()[dim + DIM])
                           / static_cast<float>(
-                              this->template get<ConvParam::StrideDims>()[dim])));
+                              this->template getAttr<ConvAttr::StrideDims>()[dim])));
             }
 
-            outputDims[1] = this->template get<ConvParam::OutChannels>();
+            outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
             outputDims[0] = mInputs[0]->dims()[0];
             mOutput->resize(outputDims);
         }
@@ -308,7 +307,7 @@ inline std::shared_ptr<Node> Conv(
 namespace
 {
 template<>
-const char *const EnumStrings<Aidge::ConvParam>::data[]
+const char *const EnumStrings<Aidge::ConvAttr>::data[]
     = {"StrideDims",
        "DilationDims",
        "InChannels",
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index be736e6d4642bbff86c11f77b2d318707e292fa0..32d62ddf926bb29b91371910ea3ee5b8e938cf37 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -21,13 +21,13 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge
 {
-enum class ConvDepthWiseParam
+enum class ConvDepthWiseAttr
 {
     StrideDims,
     DilationDims,
@@ -43,8 +43,8 @@ class ConvDepthWise_Op
           ConvDepthWise_Op<DIM>,
           std::string,
           std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
-      public Parameterizable<
-          ConvDepthWiseParam,
+      public StaticAttributes<
+          ConvDepthWiseAttr,
           std::array<DimSize_t, DIM>,
           std::array<DimSize_t, DIM>,
           DimSize_t,
@@ -64,15 +64,14 @@ public:
 
     ConvDepthWise_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<
-        ConvDepthWiseParam,
+    using Attributes_ = StaticAttributes<
+        ConvDepthWiseAttr,
         std::array<DimSize_t, DIM>,
         std::array<DimSize_t, DIM>,
         DimSize_t,
         std::array<DimSize_t, DIM>,
         std::array<DimSize_t, (DIM << 1)>>;
-    template<ConvDepthWiseParam e>
-    using param = typename Parameterizable_::template param<e>;
+    template<ConvDepthWiseAttr e> using attr = typename Attributes_::template attr<e>;
 
     constexpr ConvDepthWise_Op(
         const std::array<DimSize_t, DIM> &kernel_dims,
@@ -82,25 +81,23 @@ public:
         const std::array<DimSize_t, DIM> &dilation_dims
         = create_array<DimSize_t, DIM>(1)) :
         Operator(Type),
-        Parameterizable_(
-            param<ConvDepthWiseParam::StrideDims>(stride_dims),
-            param<ConvDepthWiseParam::DilationDims>(dilation_dims),
-            param<ConvDepthWiseParam::Channels>(0),
-            param<ConvDepthWiseParam::KernelDims>(kernel_dims),
-            param<ConvDepthWiseParam::PaddingDims>(padding_dims))
+        Attributes_(
+            attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
+            attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
+            attr<ConvDepthWiseAttr::Channels>(0),
+            attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
+            attr<ConvDepthWiseAttr::PaddingDims>(padding_dims))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     ConvDepthWise_Op(const ConvDepthWise_Op<DIM> &op) :
-        Operator(Type),
-        Parameterizable_(op),
-        mOutput(std::make_shared<Tensor>(*op.mOutput))
+        Operator(Type), Attributes_(op), mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
         setDatatype(op.mOutput->dataType());
@@ -136,12 +133,12 @@ public:
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
             for (std::size_t dim = 0;
-                 dim < this->template get<ConvDepthWiseParam::KernelDims>().size();
+                 dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size();
                  ++dim)
             {
                 const DimSize_t kernelExtent
-                    = this->template get<ConvDepthWiseParam::DilationDims>()[dim]
-                          * (this->template get<ConvDepthWiseParam::KernelDims>()[dim]
+                    = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim]
+                          * (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim]
                              - 1)
                       + 1;
 
@@ -150,16 +147,17 @@ public:
                       + static_cast<DimSize_t>(floor(
                           static_cast<float>(
                               mInputs[0]->dims()[dim + 2] - kernelExtent
-                              + this->template get<ConvDepthWiseParam::PaddingDims>()[dim]
-                              + this->template get<
-                                  ConvDepthWiseParam::PaddingDims>()[dim + DIM])
-                          / static_cast<float>(this->template get<
-                                               ConvDepthWiseParam::StrideDims>()[dim])));
+                              + this->template getAttr<
+                                  ConvDepthWiseAttr::PaddingDims>()[dim]
+                              + this->template getAttr<
+                                  ConvDepthWiseAttr::PaddingDims>()[dim + DIM])
+                          / static_cast<float>(this->template getAttr<
+                                               ConvDepthWiseAttr::StrideDims>()[dim])));
             }
-            this->template get<ConvDepthWiseParam::Channels>() = mInputs[0]->dims()[1];
+            this->template getAttr<ConvDepthWiseAttr::Channels>() = mInputs[0]->dims()[1];
             // std::array<DimSize_t, DIM+2> weightDims =
             // append(mInputs[0]->dims()[1],append(1, this->template
-            // get<ConvDepthWiseParam::KernelDims>())); if (mInputs[1]->empty()) {
+            // getAttr<ConvDepthWiseAttr::KernelDims>())); if (mInputs[1]->empty()) {
             //     mInputs[1]->resize(weightDims);
             // }
             // if (mInputs[2]->empty()) {
@@ -287,7 +285,7 @@ inline std::shared_ptr<Node> ConvDepthWise(
 namespace
 {
 template<>
-const char *const EnumStrings<Aidge::ConvDepthWiseParam>::data[]
+const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[]
     = {"StrideDims", "DilationDims", "Channels", "KernelDims", "PaddingDims"};
 }
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 8e98fded3cebfe051cb799ac171370bead7e7ea9..34fdc242e9462227eb2f89ae822f489aa0c9df2c 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -22,13 +22,13 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge
 {
-enum class FCParam
+enum class FCAttr
 {
     OutChannels,
     NoBias
@@ -37,7 +37,7 @@ enum class FCParam
 class FC_Op
     : public Operator,
       public Registrable<FC_Op, std::string, std::unique_ptr<OperatorImpl>(const FC_Op&)>,
-      public Parameterizable<FCParam, DimSize_t, bool>
+      public StaticAttributes<FCAttr, DimSize_t, bool>
 {
 public:
     // FIXME: change accessibility
@@ -52,26 +52,23 @@ public:
 
     FC_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<FCParam, DimSize_t, bool>;
-    template<FCParam e> using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<FCAttr, DimSize_t, bool>;
+    template<FCAttr e> using attr = typename Attributes_::template attr<e>;
 
     FC_Op(DimSize_t out_channels, bool noBias) :
         Operator(Type),
-        Parameterizable_(
-            param<FCParam::OutChannels>(out_channels), param<FCParam::NoBias>(noBias))
+        Attributes_(attr<FCAttr::OutChannels>(out_channels), attr<FCAttr::NoBias>(noBias))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     FC_Op(const FC_Op& op) :
-        Operator(Type),
-        Parameterizable_(op),
-        mOutput(std::make_shared<Tensor>(*op.mOutput))
+        Operator(Type), Attributes_(op), mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
         setDatatype(op.mOutput->dataType());
@@ -99,9 +96,9 @@ public:
         {
             assert(
                 std::dynamic_pointer_cast<Tensor>(data)->size()
-                == ((this->template get<FCParam::NoBias>()) == false ?
+                == ((this->template getAttr<FCAttr::NoBias>()) == false ?
                         static_cast<std::size_t>(
-                            this->template get<FCParam::OutChannels>()) :
+                            this->template getAttr<FCAttr::OutChannels>()) :
                         0));
             assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
         }
@@ -122,11 +119,11 @@ public:
         {
             // <in_features**, out_channels>
             std::array<DimSize_t, 2> weightDims
-                = {this->template get<FCParam::OutChannels>(),
+                = {this->template getAttr<FCAttr::OutChannels>(),
                    static_cast<DimSize_t>(mInputs[0]->sizeM1())};
             // <out_channels, batch>
             std::array<DimSize_t, 2> outputDims
-                = {mInputs[0]->dims()[0], this->template get<FCParam::OutChannels>()};
+                = {mInputs[0]->dims()[0], this->template getAttr<FCAttr::OutChannels>()};
 
             mInputs[1]->resize(weightDims);
             mOutput->resize(outputDims);
@@ -222,7 +219,7 @@ FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "")
 namespace
 {
 template<>
-const char* const EnumStrings<Aidge::FCParam>::data[] = {"OutChannels", "NoBias"};
+const char* const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels", "NoBias"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index b7a49f255add1c227bf4ffb8f4337d9ee22a851f..4dd516752b00df72cee1e3ce5839a96d1f927035 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -20,7 +20,7 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/CParameter.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
@@ -31,13 +31,13 @@ class GenericOperator_Op
       public Registrable<
           GenericOperator_Op,
           std::string,
-          std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>
+          std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
+      public DynamicAttributes
 {
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<DimSize_t>>(
         const std::vector<std::vector<DimSize_t>>&)>;
 
-    CParameter mParams;
     IOIndex_t mNbDataIn;
     IOIndex_t mNbIn;
     IOIndex_t mNbOut;
@@ -63,13 +63,12 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     GenericOperator_Op(const GenericOperator_Op& op) :
         Operator(op.type().c_str()),
-        mParams(op.mParams),
         mNbDataIn(op.mNbDataIn),
         mNbIn(op.mNbIn),
         mNbOut(op.mNbOut)
@@ -96,41 +95,6 @@ public:
         return std::make_shared<GenericOperator_Op>(*this);
     }
 
-    /**
-     * @brief Get the Parameter object identified by its name.
-     * @tparam T expected parameter type.
-     * @param key Parameter name.
-     * @details assert if T is not the actual parameter type, if the parameter
-     * does not exist or internal parameter position is invalid.
-     * @todo Returning a T const& ? But dangerous => may get an address within
-     * param buffer that will get invalid after the CParam death.
-     * @note at() throws if the parameter does not exist, using find to test
-     * for parameter existance
-     * @return template<class T> The parameter.
-     */
-    template<class T> const T& getParameter(std::string const& key) const
-    {
-        return mParams.Get<const T>(key);
-    }
-
-    template<class T> T& getParameter(std::string const& key)
-    {
-        return mParams.Get<T>(key);
-    }
-
-    ///\brief Add a parameter value, identified by its name
-    ///\tparam T expected parameter type
-    ///\param i_ParamName Parameter name
-    ///\param i_Value Parameter value
-    ///\todo Pass i_Value by ref if large or not trivial
-    ///\bug If parameter already exists, its value is changed but written in the
-    /// internal buffer in a new location (previous value is still in memory at
-    /// its previous location)
-    template<class T> void addParameter(std::string const& key, T&& value)
-    {
-        mParams.Add<T>(key, std::forward<T>(value));
-    }
-
     // Helper functions that can be used with setComputeOutputDims():
     static const ComputeDimsFunc Identity;
 
@@ -139,16 +103,6 @@ public:
         mComputeOutputDims = func;
     }
 
-    std::string getParameterType(std::string const& key)
-    {
-        return mParams.getParamType(key);
-    }
-
-    std::vector<std::string> getParametersName()
-    {
-        return mParams.getParametersName();
-    }
-
     // Override Virtual Opertor methods
     void
     associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 094911d8fbc5b137b7bd38ea031b4aa230c72cd1..c453d2481137a0d340cc4b44c79061511b750a5e 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -23,11 +23,12 @@
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge
 {
-enum class LeakyReLUParam
+enum class LeakyReLUAttr
 {
     NegativeSlope
 };
@@ -37,7 +38,7 @@ class LeakyReLU_Op : public Operator,
                          LeakyReLU_Op,
                          std::string,
                          std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
-                     public Parameterizable<LeakyReLUParam, float>
+                     public StaticAttributes<LeakyReLUAttr, float>
 {
 public:
     // FIXME: change accessibility
@@ -49,25 +50,22 @@ public:
 
     LeakyReLU_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<LeakyReLUParam, float>;
-    template<LeakyReLUParam e> using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<LeakyReLUAttr, float>;
+    template<LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
 
     LeakyReLU_Op(float negativeSlope) :
-        Operator(Type),
-        Parameterizable_(param<LeakyReLUParam::NegativeSlope>(negativeSlope))
+        Operator(Type), Attributes_(attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     LeakyReLU_Op(const LeakyReLU_Op& op) :
-        Operator(Type),
-        Parameterizable_(op),
-        mOutput(std::make_shared<Tensor>(*op.mOutput))
+        Operator(Type), Attributes_(op), mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
         setDatatype(op.mOutput->dataType());
@@ -184,7 +182,7 @@ LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "")
 namespace
 {
 template<>
-const char* const EnumStrings<Aidge::LeakyReLUParam>::data[] = {"NegativeSlope"};
+const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[] = {"NegativeSlope"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/Matmul.hpp b/include/aidge/operator/MatMul.hpp
similarity index 73%
rename from include/aidge/operator/Matmul.hpp
rename to include/aidge/operator/MatMul.hpp
index dfee9c12e9664e8ac91b495791149b129faa006f..bacf20c21032d30b5560217d615d7c08477c6c18 100644
--- a/include/aidge/operator/Matmul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -22,23 +22,23 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge
 {
-enum class MatmulParam
+enum class MatMulAttr
 {
     OutChannels
 };
 
-class Matmul_Op : public Operator,
+class MatMul_Op : public Operator,
                   public Registrable<
-                      Matmul_Op,
+                      MatMul_Op,
                       std::string,
-                      std::unique_ptr<OperatorImpl>(const Matmul_Op&)>,
-                  public Parameterizable<MatmulParam, DimSize_t>
+                      std::unique_ptr<OperatorImpl>(const MatMul_Op&)>,
+                  public StaticAttributes<MatMulAttr, DimSize_t>
 {
 public:
     std::array<std::shared_ptr<Tensor>, 2> mInputs
@@ -46,43 +46,41 @@ public:
     const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
 
 public:
-    static constexpr const char* Type = "Matmul";
+    static constexpr const char* Type = "MatMul";
 
-    Matmul_Op() = delete;
+    MatMul_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<MatmulParam, DimSize_t>;
-    template<MatmulParam e> using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<MatMulAttr, DimSize_t>;
+    template<MatMulAttr e> using attr = typename Attributes_::template attr<e>;
 
-    Matmul_Op(DimSize_t out_channels) :
-        Operator(Type), Parameterizable_(param<MatmulParam::OutChannels>(out_channels))
+    MatMul_Op(DimSize_t out_channels) :
+        Operator(Type), Attributes_(attr<MatMulAttr::OutChannels>(out_channels))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Matmul_Op(const Matmul_Op& op) :
-        Operator(Type),
-        Parameterizable_(op),
-        mOutput(std::make_shared<Tensor>(*op.mOutput))
+    MatMul_Op(const MatMul_Op& op) :
+        Operator(Type), Attributes_(op), mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
         setDatatype(op.mOutput->dataType());
         mImpl = op.mImpl ?
-                    Registrar<Matmul_Op>::create(mOutput->getImpl().backend())(*this) :
+                    Registrar<MatMul_Op>::create(mOutput->getImpl()->backend())(*this) :
                     nullptr;
     }
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Matmul_Op
+     * @see Operator::MatMul_Op
      */
     std::shared_ptr<Operator> clone() const override
     {
-        return std::make_shared<Matmul_Op>(*this);
+        return std::make_shared<MatMul_Op>(*this);
     }
 
     void
@@ -101,11 +99,11 @@ public:
         {
             // <in_features**, out_channels>
             std::array<DimSize_t, 2> weightDims
-                = {static_cast<DimSize_t>(mInputs[0]->size()),
-                   this->template get<MatmulParam::OutChannels>()};
+                = {this->template getAttr<MatMulAttr::OutChannels>(),
+                   static_cast<DimSize_t>(mInputs[0]->sizeM1())};
             // <out_channels, batch>
-            std::array<DimSize_t, 1> outputDims
-                = {this->template get<MatmulParam::OutChannels>()};
+            std::array<DimSize_t, 2> outputDims = {
+                mInputs[0]->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()};
 
             mInputs[1]->resize(weightDims);
             mOutput->resize(outputDims);
@@ -154,7 +152,7 @@ public:
 
     void setBackend(const std::string& name)
     {
-        mImpl = Registrar<Matmul_Op>::create(name)(*this);
+        mImpl = Registrar<MatMul_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
         // FIXME: temporary workaround
@@ -185,18 +183,18 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const std::string& name = "")
+inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& name = "")
 {
-    // FIXME: properly handle default w&b initialization in every cases
-    auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name);
-    addProducer(matmul, 1, {1, out_channels}, "w");
+    // FIXME: properly handle default w initialization in every cases
+    auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(out_channels), name);
+    addProducer(matmul, 1, {out_channels, 1}, "w");
     return matmul;
 }
 } // namespace Aidge
 
 namespace
 {
-template<> const char* const EnumStrings<Aidge::MatmulParam>::data[] = {"OutChannels"};
+template<> const char* const EnumStrings<Aidge::MatMulAttr>::data[] = {"OutChannels"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index f3c420a1ed1457799d48bea09323d8c2e0ac0f35..9ad3ff8a8b99c301513558286a4911ece1c8d519 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -21,13 +21,13 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge
 {
-enum class MaxPoolingParam
+enum class MaxPoolingAttr
 {
     StrideDims,
     KernelDims,
@@ -40,8 +40,8 @@ class MaxPooling_Op : public Operator,
                           MaxPooling_Op<DIM>,
                           std::string,
                           std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
-                      public Parameterizable<
-                          MaxPoolingParam,
+                      public StaticAttributes<
+                          MaxPoolingAttr,
                           std::array<DimSize_t, DIM>,
                           std::array<DimSize_t, DIM>,
                           std::array<DimSize_t, (DIM << 1)>>
@@ -56,13 +56,12 @@ public:
 
     MaxPooling_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<
-        MaxPoolingParam,
+    using Attributes_ = StaticAttributes<
+        MaxPoolingAttr,
         std::array<DimSize_t, DIM>,
         std::array<DimSize_t, DIM>,
         std::array<DimSize_t, (DIM << 1)>>;
-    template<MaxPoolingParam e>
-    using param = typename Parameterizable_::template param<e>;
+    template<MaxPoolingAttr e> using attr = typename Attributes_::template attr<e>;
 
     constexpr MaxPooling_Op(
         const std::array<DimSize_t, DIM> &kernel_dims,
@@ -70,24 +69,22 @@ public:
         const std::array<DimSize_t, (DIM << 1)> &padding_dims
         = create_array<DimSize_t, (DIM << 1)>(0)) :
         Operator(Type),
-        Parameterizable_(
-            param<MaxPoolingParam::StrideDims>(stride_dims),
-            param<MaxPoolingParam::KernelDims>(kernel_dims),
-            param<MaxPoolingParam::PaddingDims>(padding_dims)),
+        Attributes_(
+            attr<MaxPoolingAttr::StrideDims>(stride_dims),
+            attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+            attr<MaxPoolingAttr::PaddingDims>(padding_dims)),
         mOutput(std::make_shared<Tensor>())
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     MaxPooling_Op(const MaxPooling_Op<DIM> &op) :
-        Operator(Type),
-        Parameterizable_(op),
-        mOutput(std::make_shared<Tensor>(*op.mOutput))
+        Operator(Type), Attributes_(op), mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
         setDatatype(op.mOutput->dataType());
@@ -124,7 +121,7 @@ public:
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
             for (std::size_t dim = 0;
-                 dim < this->template get<MaxPoolingParam::KernelDims>().size();
+                 dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size();
                  ++dim)
             {
                 outputDims[dim + 2]
@@ -132,12 +129,12 @@ public:
                       + static_cast<DimSize_t>(std::floor(
                           static_cast<float>(
                               mInput->dims()[dim + 2]
-                              - this->template get<MaxPoolingParam::KernelDims>()[dim]
-                              + this->template get<MaxPoolingParam::PaddingDims>()[dim]
-                              + this->template get<
-                                  MaxPoolingParam::PaddingDims>()[dim + DIM])
-                          / static_cast<float>(
-                              this->template get<MaxPoolingParam::StrideDims>()[dim])));
+                              - this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]
+                              + this->template getAttr<MaxPoolingAttr::PaddingDims>()[dim]
+                              + this->template getAttr<
+                                  MaxPoolingAttr::PaddingDims>()[dim + DIM])
+                          / static_cast<float>(this->template getAttr<
+                                               MaxPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = mInput->dims()[1];
             outputDims[0] = mInput->dims()[0];
@@ -256,7 +253,7 @@ inline std::shared_ptr<Node> MaxPooling(
 namespace
 {
 template<>
-const char *const EnumStrings<Aidge::MaxPoolingParam>::data[]
+const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[]
     = {"StrideDims", "KernelDims", "PaddingDims"};
 }
 
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 9e12b159888923cfea10dd02b7b267a46abcb3b7..0c77a752493d251303c036c4061823c4f8bc499d 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -23,7 +23,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     MetaOperator(const MetaOperator& op)
@@ -34,7 +34,7 @@ public:
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Matmul_Op
+     * @see Operator::MatMul_Op
      */
     std::shared_ptr<Operator> clone() const override {
         return std::make_shared<MetaOperator>(*this);
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index f0dd8a9c651eda11e52afd7fc473726a85a0c657..50d973362a68320a8983f35ac1ea62bdf17c29bf 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -19,8 +19,8 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge
@@ -51,7 +51,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
@@ -80,6 +80,17 @@ public:
         assert(false && "Producer operator takes no input");
     }
 
+    /**
+     * @brief Set the Output Tensor of the Producer operator.
+     * This method will create a copy of the Tensor.
+     *
+     * @param newOutput Tensor containing the values to copy
+     */
+    void setOutputTensor(const Tensor& newOutput)
+    {
+        *mOutput = newOutput;
+    }
+
     void computeOutputDims() override final
     {
     }
@@ -218,4 +229,4 @@ void addProducer(
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 61eca1f249c5ebf21d53f0bc1d228107068ecad4..f1966f31715a66581949bb335c631470d0684214 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -47,7 +47,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 23e6c5f1dd2f0473c62564f20e86a91df130e3c5..532c08690d6005ed3dfd92cc4651dfa195cdf803 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -22,11 +22,12 @@
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge
 {
-enum class ScalingParam
+enum class ScalingAttr
 {
     scalingFactor
 };
@@ -36,7 +37,7 @@ class Scaling_Op : public Operator,
                        Scaling_Op,
                        std::string,
                        std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
-                   public Parameterizable<ScalingParam, float>
+                   public StaticAttributes<ScalingAttr, float>
 {
 public:
     // FIXME: change accessibility
@@ -48,25 +49,22 @@ public:
 
     Scaling_Op() = delete;
 
-    using Parameterizable_ = Parameterizable<ScalingParam, float>;
-    template<ScalingParam e> using param = typename Parameterizable_::template param<e>;
+    using Attributes_ = StaticAttributes<ScalingAttr, float>;
+    template<ScalingAttr e> using attr = typename Attributes_::template attr<e>;
 
     Scaling_Op(float scalingFactor) :
-        Operator(Type),
-        Parameterizable_(param<ScalingParam::scalingFactor>(scalingFactor))
+        Operator(Type), Attributes_(attr<ScalingAttr::scalingFactor>(scalingFactor))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Scaling_Op(const Scaling_Op& op) :
-        Operator(Type),
-        Parameterizable_(op),
-        mOutput(std::make_shared<Tensor>(*op.mOutput))
+        Operator(Type), Attributes_(op), mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
         setDatatype(op.mOutput->dataType());
@@ -184,7 +182,7 @@ Scaling(float scalingFactor = 1.0f, const std::string& name = "")
 
 namespace
 {
-template<> const char* const EnumStrings<Aidge::ScalingParam>::data[] = {"scalingFactor"};
+template<> const char* const EnumStrings<Aidge::ScalingAttr>::data[] = {"scalingFactor"};
 }
 
 #endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 87968de49b782ca97ed4ef6003f9c19c6612c665..f6b901add8950c3686261c6cae6f826020e4c75e 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -47,7 +47,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but
      * not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
diff --git a/include/aidge/utils/Any.hpp b/include/aidge/utils/Any.hpp
index 8b6fb69f82dce838d72e561e496800873ad792a5..8eb4d8f4136f1bb835843d1b39cf64923d499256 100644
--- a/include/aidge/utils/Any.hpp
+++ b/include/aidge/utils/Any.hpp
@@ -1,197 +1,534 @@
-
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
+/**
+ * Origin: https://github.com/claudiofantacci/any
+ *
+ * Implementation of N4562 std::experimental::any (merged into C++17 as std::any)
+ * for C++11 compilers.
  *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
+ * See also:
+ *   + http://en.cppreference.com/w/cpp/any
+ *   + http://en.cppreference.com/w/cpp/experimental/any
+ *   + http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4562.html#any
+ *   + https://cplusplus.github.io/LWG/lwg-active.html#2509
  *
- * SPDX-License-Identifier: EPL-2.0
+ * Copyright (c) 2016 Denilson das Mercês Amorim
+ * Copyright (c) 2018 Claudio Fantacci
  *
- ********************************************************************************/
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE.md or copy at http://www.boost.org/LICENSE_1_0.txt)
+ */
 
-#ifndef AIDGE_ANY_H_
-#define AIDGE_ANY_H_
+#ifndef AIDGE_CORE_UTILS_ANY_H_
+#define AIDGE_CORE_UTILS_ANY_H_
 
-#include <assert.h>
-#include <new>
-#include <type_traits> // std::enable_if_t, std::decay_t, std::is_same, std::is_copy_constructible, std::remove_cv, std::remove_reference
-#include <typeinfo> // typeid
+#include <stdexcept>
+#include <type_traits>
+#include <typeinfo>
+#include <utility>
 
-class _any
+namespace libany
 {
-private:
-    /// @brief Operation to perform on the object.
-    enum _Op
+class bad_any_cast : public std::bad_cast
+{
+public:
+    const char* what() const noexcept override
     {
-        _Op_access,
-        _Op_get_type_info,
-        _Op_clone,
-        _Op_destroy
-    };
+        return "bad any_cast";
+    }
+};
 
-    union _Arg
+class any final
+{
+public:
+    /**
+     * Constructs an object of type any with an empty state.
+     */
+    any() : vtable(nullptr)
     {
-        const std::type_info* _M_typeinfo;
-        _any* _M_any;
-    };
-
-    /// @brief Stored data without type information.
-    void* _M_data;
-
-    /// @brief Member function to perform type-related computations on stored data.
-    void (*_M_manager)(_Op, const _any*, _Arg*);
+    }
 
-public:
-    /// @brief Class to centralize functions and type information in a memory efficient
-    /// way.
-    /// @tparam Tp Decayed stored type.
-    template<typename Tp> struct Manager
+    /**
+     * Constructs an object of type any with an equivalent state as other.
+     */
+    any(const any& rhs) : vtable(rhs.vtable)
     {
-        static void manage(_Op which, const _any* __any, _Arg* __arg)
+        if (rhs.has_value())
         {
-            auto ptr = static_cast<const Tp*>(__any->_M_data);
-            switch (which)
-            {
-                case _Op_get_type_info:
-                    __arg->_M_typeinfo = &typeid(Tp);
-                    break;
-                case _Op_clone:
-                    __arg->_M_any->_M_data = new Tp(*ptr);
-                    __arg->_M_any->_M_manager = __any->_M_manager;
-                    break;
-                case _Op_destroy:
-                    delete ptr;
-                    break;
-            }
+            rhs.vtable->copy(rhs.storage, this->storage);
         }
-        static Tp* access(const _any* __any)
+    }
+
+    /**
+     * Constructs an object of type any with a state equivalent to the original state of
+     * other. rhs is left in a valid but otherwise unspecified state.
+     */
+    any(any&& rhs) noexcept : vtable(rhs.vtable)
+    {
+        if (rhs.has_value())
         {
-            return static_cast<Tp*>(__any->_M_data);
+            rhs.vtable->move(rhs.storage, this->storage);
+            rhs.vtable = nullptr;
         }
+    }
 
-        // template <typename Up>
-        // static void create(void* data, Up&& value) {
-        //     data = new Tp(std::forward<Up>(value));
-        // }
-    };
+    /**
+     * Same effect as this->clear().
+     */
+    ~any()
+    {
+        this->reset();
+    }
 
-private:
-    template<typename _Tp, typename _VTp = std::decay_t<_Tp>>
-    using _Decay_if_not_any = std::enable_if_t<!std::is_same<_VTp, _any>::value, _VTp>;
+    /**
+     * Constructs an object of type any that contains an object of type T
+     * direct-initialized with std::forward<ValueType>(value). T shall satisfy the
+     * CopyConstructible requirements, otherwise the program is ill-formed. This is
+     * because an `any` may be copy constructed into another `any` at any time, so a copy
+     * should always be allowed.
+     */
+    template<
+        typename ValueType,
+        typename = typename std::enable_if<
+            !std::is_same<typename std::decay<ValueType>::type, any>::value>::type>
+    any(ValueType&& value)
+    {
+        static_assert(
+            std::is_copy_constructible<typename std::decay<ValueType>::type>::value,
+            "T shall satisfy the CopyConstructible requirements.");
+        this->construct(std::forward<ValueType>(value));
+    }
 
-public:
-    /// @brief Default constructor
-    _any() noexcept : _M_manager(nullptr)
+    /**
+     * Has the same effect as any(rhs).swap(*this). No effects if an exception is thrown.
+     */
+    any& operator=(const any& rhs)
     {
+        any(rhs).swap(*this);
+        return *this;
     }
 
-    /// @brief Copy constructor
-    /// @param __other
-    _any(const _any& __other)
+    /**
+     * Has the same effect as any(std::move(rhs)).swap(*this).
+     * The state of *this is equivalent to the original state of rhs and rhs is left in a
+     * valid but otherwise unspecified state.
+     */
+    any& operator=(any&& rhs) noexcept
     {
-        if (!__other._M_manager)
-            _M_manager = nullptr;
-        else
+        any(std::move(rhs)).swap(*this);
+        return *this;
+    }
+
+    /**
+     * Has the same effect as any(std::forward<ValueType>(value)).swap(*this). No effect
+     * if a exception is thrown. T shall satisfy the CopyConstructible requirements,
+     * otherwise the program is ill-formed. This is because an `any` may be copy
+     * constructed into another `any` at any time, so a copy should always be allowed.
+     */
+    template<
+        typename ValueType,
+        typename = typename std::enable_if<
+            !std::is_same<typename std::decay<ValueType>::type, any>::value>::type>
+    any& operator=(ValueType&& value)
+    {
+        static_assert(
+            std::is_copy_constructible<typename std::decay<ValueType>::type>::value,
+            "T shall satisfy the CopyConstructible requirements.");
+        any(std::forward<ValueType>(value)).swap(*this);
+        return *this;
+    }
+
+    /**
+     * If not empty, destroys the contained object.
+     */
+    void reset() noexcept
+    {
+        if (has_value())
         {
-            _Arg __arg;
-            __arg._M_any = this;
-            __other._M_manager(_Op_clone, &__other, &__arg);
+            this->vtable->destroy(storage);
+            this->vtable = nullptr;
         }
     }
 
-    /// @brief Move constructor
-    /// @param __other
-    _any(_any&& __other)
+    /**
+     * Returns true if *this has no contained object, otherwise false.
+     */
+    bool has_value() const noexcept
     {
-        if (!__other._M_manager)
-            _M_manager = nullptr;
+        return this->vtable != nullptr;
+    }
+
+    /**
+     * If *this has a contained object of type T, typeid(T); otherwise typeid(void).
+     */
+    const std::type_info& type() const noexcept
+    {
+        return has_value() ? this->vtable->type() : typeid(void);
+    }
+
+    /**
+     * Exchange the states of *this and rhs.
+     */
+    void swap(any& other) noexcept
+    {
+        if (this->vtable != other.vtable)
+        {
+            any tmp(std::move(other));
+
+            other.vtable = this->vtable;
+            if (this->vtable != nullptr)
+                this->vtable->move(this->storage, other.storage);
+
+            this->vtable = tmp.vtable;
+            if (tmp.vtable != nullptr)
+            {
+                tmp.vtable->move(tmp.storage, this->storage);
+                tmp.vtable = nullptr;
+            }
+        }
         else
         {
-            _M_data = __other._M_data;
-            _M_manager = __other._M_manager;
-            const_cast<_any*>(&__other)->_M_manager = nullptr;
+            if (this->vtable != nullptr)
+                this->vtable->swap(this->storage, other.storage);
         }
     }
 
-    /// @brief By-value constructor.
-    /// @tparam T Data type.
-    /// @tparam VT Decayed data type.
-    /// @param value
-    template<
-        typename T,
-        typename VT = _Decay_if_not_any<T>,
-        std::enable_if_t<std::is_copy_constructible<VT>::value, bool> = true>
-    explicit _any(T&& value) :
-        _M_manager(&Manager<VT>::manage), _M_data(new VT{std::forward<T>(value)})
+private:
+    union storage_union
     {
-    }
+        using stack_storage_t = typename std::
+            aligned_storage<2 * sizeof(void*), std::alignment_of<void*>::value>::type;
 
-    ~_any()
+        void* dynamic;
+
+        stack_storage_t stack;
+    };
+
+    /**
+     * Base VTable specification.
+     *
+     * Note: The caller is responsible for doing .vtable = nullptr after destructful
+     * operations such as destroy() and/or move().
+     */
+    struct vtable_type
     {
-        if (_M_manager)
+        /**
+         * The type of the object this vtable is for.
+         */
+        const std::type_info& (*type)() noexcept;
+
+        /**
+         * Destroys the object in the union.
+         * The state of the union after this call is unspecified, caller must ensure not
+         * to use src anymore.
+         */
+        void (*destroy)(storage_union&) noexcept;
+
+        /**
+         * Copies the **inner** content of the src union into the yet unitialized dest
+         * union. As such, both inner objects will have the same state, but on separate
+         * memory locations.
+         */
+        void (*copy)(const storage_union& src, storage_union& dest);
+
+        /**
+         * Moves the storage from src to the yet unitialized dest union.
+         * The state of src after this call is unspecified, caller must ensure not to use
+         * src anymore.
+         */
+        void (*move)(storage_union& src, storage_union& dest) noexcept;
+
+        /**
+         * Exchanges the storage between lhs and rhs.
+         */
+        void (*swap)(storage_union& lhs, storage_union& rhs) noexcept;
+    };
+
+    /**
+     * VTable for dynamically allocated storage.
+     */
+    template<typename T> struct vtable_dynamic
+    {
+        static const std::type_info& type() noexcept
+        {
+            return typeid(T);
+        }
+
+        static void destroy(storage_union& storage) noexcept
+        {
+            delete reinterpret_cast<T*>(storage.dynamic);
+        }
+
+        static void copy(const storage_union& src, storage_union& dest)
+        {
+            dest.dynamic = new T(*reinterpret_cast<const T*>(src.dynamic));
+        }
+
+        static void move(storage_union& src, storage_union& dest) noexcept
+        {
+            dest.dynamic = src.dynamic;
+            src.dynamic = nullptr;
+        }
+
+        static void swap(storage_union& lhs, storage_union& rhs) noexcept
         {
-            _M_manager(_Op_destroy, this, nullptr);
-            _M_manager = nullptr;
+            std::swap(lhs.dynamic, rhs.dynamic);
         }
+    };
+
+    /**
+     * VTable for stack allocated storage.
+     */
+    template<typename T> struct vtable_stack
+    {
+        static const std::type_info& type() noexcept
+        {
+            return typeid(T);
+        }
+
+        static void destroy(storage_union& storage) noexcept
+        {
+            reinterpret_cast<T*>(&storage.stack)->~T();
+        }
+
+        static void copy(const storage_union& src, storage_union& dest)
+        {
+            new (&dest.stack) T(reinterpret_cast<const T&>(src.stack));
+        }
+
+        static void move(storage_union& src, storage_union& dest) noexcept
+        {
+            /**
+             * One of the conditions for using vtable_stack is a nothrow move constructor,
+             * so this move constructor will never throw a exception.
+             */
+            new (&dest.stack) T(std::move(reinterpret_cast<T&>(src.stack)));
+            destroy(src);
+        }
+
+        static void swap(storage_union& lhs, storage_union& rhs) noexcept
+        {
+            storage_union tmp_storage;
+            move(rhs, tmp_storage);
+            move(lhs, rhs);
+            move(tmp_storage, lhs);
+        }
+    };
+
+    /**
+     * Whether the type T must be dynamically allocated or can be stored on the stack.
+     */
+    template<typename T>
+    struct requires_allocation
+        : std::integral_constant<
+              bool,
+              !(std::is_nothrow_move_constructible<T>::value // N4562 6.3/3 [any.class]
+                && sizeof(T) <= sizeof(storage_union::stack)
+                && std::alignment_of<T>::value
+                       <= std::alignment_of<storage_union::stack_storage_t>::value)>
+    {
+    };
+
+    /**
+     * Returns the pointer to the vtable of the type T.
+     */
+    template<typename T> static vtable_type* vtable_for_type()
+    {
+        using VTableType = typename std::conditional<
+            requires_allocation<T>::value,
+            vtable_dynamic<T>,
+            vtable_stack<T>>::type;
+        static vtable_type table
+            = {VTableType::type,
+               VTableType::destroy,
+               VTableType::copy,
+               VTableType::move,
+               VTableType::swap};
+        return &table;
+    }
+
+protected:
+    template<typename T> friend const T* any_cast(const any* operand) noexcept;
+
+    template<typename T> friend T* any_cast(any* operand) noexcept;
+
+    /**
+     * Same effect as is_same(this->type(), t);
+     */
+    bool is_typed(const std::type_info& t) const
+    {
+        return is_same(this->type(), t);
     }
 
-    /// @brief Access type id of the value currently stored
-    /// @return
-    const std::type_info& type() const
+    /**
+     * Checks if two type infos are the same.
+     * If ANY_IMPL_FAST_TYPE_INFO_COMPARE is defined, checks only the address of the
+     * type infos, otherwise does an actual comparision. Checking addresses is
+     * only a valid approach when there's no interaction with outside sources
+     * (other shared libraries and such).
+     */
+    static bool is_same(const std::type_info& a, const std::type_info& b)
     {
-        if (!_M_manager)
-            return typeid(void);
-        _Arg __arg;
-        _M_manager(_Op_get_type_info, this, &__arg);
-        return *__arg._M_typeinfo;
+#ifdef ANY_IMPL_FAST_TYPE_INFO_COMPARE
+        return &a == &b;
+#else
+        return a == b;
+#endif
+    }
+
+    /**
+     * Casts (with no type_info checks) the storage pointer as const T*.
+     */
+    template<typename T> const T* cast() const noexcept
+    {
+        return requires_allocation<typename std::decay<T>::type>::value ?
+                   reinterpret_cast<const T*>(storage.dynamic) :
+                   reinterpret_cast<const T*>(&storage.stack);
+    }
+
+    /**
+     * Casts (with no type_info checks) the storage pointer as T*.
+     */
+    template<typename T> T* cast() noexcept
+    {
+        return requires_allocation<typename std::decay<T>::type>::value ?
+                   reinterpret_cast<T*>(storage.dynamic) :
+                   reinterpret_cast<T*>(&storage.stack);
+    }
+
+private:
+    storage_union storage; // On offset(0) so no padding for align
+
+    vtable_type* vtable;
+
+    template<typename ValueType, typename T>
+    typename std::enable_if<requires_allocation<T>::value>::type
+    do_construct(ValueType&& value)
+    {
+        storage.dynamic = new T(std::forward<ValueType>(value));
+    }
+
+    template<typename ValueType, typename T>
+    typename std::enable_if<!requires_allocation<T>::value>::type
+    do_construct(ValueType&& value)
+    {
+        new (&storage.stack) T(std::forward<ValueType>(value));
+    }
+
+    /**
+     * Chooses between stack and dynamic allocation for the type decay_t<ValueType>,
+     * assigns the correct vtable, and constructs the object on our storage.
+     */
+    template<typename ValueType> void construct(ValueType&& value)
+    {
+        using T = typename std::decay<ValueType>::type;
+
+        this->vtable = vtable_for_type<T>();
+
+        do_construct<ValueType, T>(std::forward<ValueType>(value));
     }
 };
 
 namespace detail
 {
-/// @note Conditions must inherite from std::false_type or std::true_type
-/// @note implementation from cppreference for std::disjunction
-/// @deprecated as soon as C++17 is supported
-/// @todo static_assert if conditions are not of a compliant type
-/// @internal
-template<typename... Conditions_P> struct disjunction : std::false_type
+template<typename ValueType>
+inline ValueType
+any_cast_move_if_true(typename std::remove_reference<ValueType>::type* p, std::true_type)
 {
-};
-template<typename Condition_T> struct disjunction<Condition_T> : Condition_T
+    return std::move(*p);
+}
+
+template<typename ValueType>
+inline ValueType
+any_cast_move_if_true(typename std::remove_reference<ValueType>::type* p, std::false_type)
 {
-};
-template<typename Condition_T, typename... Conditions_P>
-struct disjunction<Condition_T, Conditions_P...> : std::conditional_t<
-                                                       bool(Condition_T::value),
-                                                       Condition_T,
-                                                       disjunction<Conditions_P...>>
+    return *p;
+}
+} // namespace detail
+
+/**
+ * Performs *any_cast<add_const_t<remove_reference_t<ValueType>>>(&operand), or throws
+ * bad_any_cast on failure.
+ */
+template<typename ValueType> inline ValueType any_cast(const any& operand)
 {
-};
+    auto p = any_cast<
+        typename std::add_const<typename std::remove_reference<ValueType>::type>::type>(
+        &operand);
+    if (p == nullptr)
+        throw bad_any_cast();
+    return *p;
+}
 
-} // namespace detail
+/**
+ * Performs *any_cast<remove_reference_t<ValueType>>(&operand), or throws bad_any_cast on
+ * failure.
+ */
+template<typename ValueType> inline ValueType any_cast(any& operand)
+{
+    auto p = any_cast<typename std::remove_reference<ValueType>::type>(&operand);
+    if (p == nullptr)
+        throw bad_any_cast();
+    return *p;
+}
+
+/**
+ * If ANY_IMPL_ANYCAST_MOVEABLE is not defined, does as N4562 specifies:
+ *     Performs *any_cast<remove_reference_t<ValueType>>(&operand), or throws bad_any_cast
+ * on failure.
+ *
+ * If ANY_IMPL_ANYCAST_MOVEABLE is defined, does as LWG Defect 2509 specifies [1]:
+ *     If ValueType is MoveConstructible and isn't a lvalue reference, performs
+ *     std::move(*any_cast<remove_reference_t<ValueType>>(&operand)), otherwise
+ *     *any_cast<remove_reference_t<ValueType>>(&operand).
+ *     Throws bad_any_cast on failure.
+ *
+ *     [1] https://cplusplus.github.io/LWG/lwg-active.html#2509
+ */
+template<typename ValueType> inline ValueType any_cast(any&& operand)
+{
+#ifdef ANY_IMPL_ANY_CAST_MOVEABLE
+    using can_move = std::integral_constant<
+        bool,
+        std::is_move_constructible<ValueType>::value
+            && !std::is_lvalue_reference<ValueType>::value>;
+#else
+    using can_move = std::false_type;
+#endif
 
-/// @brief Access value stored in the object converted in the template type if possible.
-/// @tparam _ValueType
-/// @param __any
-/// @return Stored value.
-template<typename _ValueType> inline _ValueType any_cast(const _any& __any)
+    auto p = any_cast<typename std::remove_reference<ValueType>::type>(&operand);
+    if (p == nullptr)
+        throw bad_any_cast();
+    return detail::any_cast_move_if_true<ValueType>(p, can_move());
+}
+
+/**
+ * If operand != nullptr && operand->type() == typeid(ValueType), a pointer to the object
+ * contained by operand, otherwise nullptr.
+ */
+template<typename T> inline const T* any_cast(const any* operand) noexcept
+{
+    if (operand == nullptr || !operand->is_typed(typeid(T)))
+        return nullptr;
+    else
+        return operand->cast<T>();
+}
+
+/**
+ * If operand != nullptr && operand->type() == typeid(ValueType), a pointer to the object
+ * contained by operand, otherwise nullptr.
+ */
+template<typename T> inline T* any_cast(any* operand) noexcept
 {
-    using _Up = std::remove_cv_t<std::remove_reference_t<_ValueType>>;
-    assert(
-        (detail::disjunction<
-             std::is_reference<_ValueType>,
-             std::is_copy_constructible<_ValueType>>::value
-         && "Template argument must be a reference or CopyConstructible type"));
-    assert(
-        (std::is_constructible<_ValueType, const _Up&>::value
-         && "Template argument must be constructible from a const value."));
-    assert(std::is_object<_Up>::value);
-    assert(__any.type() == typeid(_Up));
-    auto __p = static_cast<_Up*>(__any._M_data);
-    if (__p)
-        return static_cast<_ValueType>(*__p);
-    throw std::bad_cast();
+    if (operand == nullptr || !operand->is_typed(typeid(T)))
+        return nullptr;
+    else
+        return operand->cast<T>();
 }
 
-#endif /* AIDGE_ANY_H_ */
\ No newline at end of file
+inline void swap(any& lhs, any& rhs) noexcept
+{
+    lhs.swap(rhs);
+}
+
+} // namespace libany
+
+#endif /* AIDGE_CORE_UTILS_ANY_H_ */
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..76875f15ff4229522e6208b0edb23ec519ff59ce
--- /dev/null
+++ b/include/aidge/utils/Attributes.hpp
@@ -0,0 +1,76 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_ATTRIBUTES_H_
+#define AIDGE_CORE_UTILS_ATTRIBUTES_H_
+
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#endif
+#include <vector>
+#include <string>
+
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
+namespace {
+// This is the type that will hold all the strings. Each enumerate type will
+// declare its own specialization.
+template <typename T> struct EnumStrings {
+    static const char* const data[];
+};
+}
+
+namespace Aidge {
+template<class T, std::size_t N>
+constexpr std::size_t size(T (&)[N]) { return N; }
+
+/* This abstract class allows to avoid binding Attributes.
+*  Otherwise we would need to bind every template possible of Attributes.
+*  Every operators can access the methods of this class by inheriting from
+*  Attributes in the binding code.
+*/
+class Attributes {
+public:
+    /**
+     * @brief Check if the attribute exists.
+     * @param name Name of the attribute to check.
+     * @return bool True if the attribute exists, false otherwise.
+    */
+    virtual bool hasAttr(const std::string& name) const = 0;
+
+    /**
+     * @brief Get the (implementation defined) name of the type of an attribute, returned by std::type_info::name.
+     * @param name Name of the attribute.
+     * @return std::string Name of the type as returned by std::type_info::name.
+    */
+    virtual std::string getAttrType(const std::string& name) const = 0;
+
+    /**
+     * @brief Get the attribute's name list.
+     * @return std::set<std::string> Vector of names of the attributes.
+    */
+    virtual std::set<std::string> getAttrsName() const = 0;
+
+#ifdef PYBIND
+    /* Bindable get function, does not recquire any templating.
+    *  This is thanks to py::object which allow the function to
+    *  be agnostic from its return type.
+    */
+    virtual py::object getAttrPy(const std::string& name) const = 0;
+#endif
+    virtual ~Attributes() {}
+};
+}
+
+#endif /* AIDGE_CORE_UTILS_ATTRIBUTES_H_ */
diff --git a/include/aidge/utils/CParameter.hpp b/include/aidge/utils/CParameter.hpp
deleted file mode 100644
index 7246bc3c7555c12402e864f62416b714052320d7..0000000000000000000000000000000000000000
--- a/include/aidge/utils/CParameter.hpp
+++ /dev/null
@@ -1,102 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPARAMETER_H_
-#define AIDGE_CPARAMETER_H_
-
-#include <map>
-#include <vector>
-#include <string>
-#include <type_traits>
-#include <typeinfo>
-#include <assert.h>
-
-#include "aidge/utils/Any.hpp"
-
-
-namespace Aidge {
-
-///\todo store also a fix-sized code that indicates the type
-///\todo managing complex types or excluding non-trivial, non-aggregate types
-class CParameter {
-private:
-    template<typename _ValueType>
-    inline _ValueType& any_cast_ref(const _any& __any)
-    {
-        using _Up =  std::remove_cv_t<std::remove_reference_t<_ValueType>>;
-        assert(((std::is_reference<_ValueType>::value || std::is_copy_constructible<_ValueType>::value) && "Template argument must be a reference or CopyConstructible type"));
-        assert((std::is_constructible<_ValueType, const _Up&>::value && "Template argument must be constructible from a const value."));
-        assert(std::is_object<_Up>::value);
-        assert(__any.type() == typeid(_Up));
-        if (_any::Manager<_Up>::access(&__any)) { // assess if _any object is empty
-            return *static_cast<_ValueType*>(_any::Manager<_Up>::access(&__any));
-        }
-        throw std::bad_cast();
-    }
-public:
-    CParameter() : m_Params({}){};
-    ~CParameter() = default;
-
-    /**
-     * \brief Returning a parameter identified by its name
-     * \tparam T expected parameter type
-     * \param i_ParamName Parameter name
-     * \details assert if T is not the actual parameter type, if the parameter does not
-     *  exist or interna parameter position is invalid.
-     * \todo Returning a T const& ? But dangerous => the client may get an address within
-     *  param buffer that will get invalid after the CParam death.
-     * \note at() throws if the parameter does not exist, using find to test for parameter existance
-     */
-    template<class T> T& Get(const std::string i_ParamName)
-    {
-        return any_cast_ref<T>(m_Buffer[m_Params.at(i_ParamName)]);
-    }
-
-    // template<class T> const T& Get(const std::string i_ParamName) const
-    // {
-    //     return any_cast<T>(m_Buffer[m_Params.at(i_ParamName)]);
-    // }
-
-    ///\brief Add a parameter value, identified by its name
-    ///\tparam T expected parameter type
-    ///\param i_ParamName Parameter name
-    ///\param i_Value Parameter value
-    ///\todo Pass i_Value by ref if large or not trivial
-    ///\bug If parameter already exists, its value is changed but written in the
-    /// internal buffer in a new location (previous value is still in memory at its previous location)
-    template<class T> void Add(const std::string &i_ParamName, T&& i_Value)
-    {
-        m_Params[i_ParamName] = m_Buffer.size(); // Copy pointer offset
-        m_Buffer.push_back(_any(std::forward<T>(i_Value)));
-    }
-
-
-    std::string getParamType(std::string const &i_ParamName){
-        return m_Buffer[m_Params.at(i_ParamName)].type().name();
-    }
-
-    std::vector<std::string> getParametersName(){
-        std::vector<std::string> parametersName;
-        for(auto const& it: m_Params)
-            parametersName.push_back(it.first);
-        return parametersName;
-    }
-
-private:
-    std::map<std::string, std::size_t> m_Params; // { Param name : offset }
-
-    ///\brief All raw pointers to parameters values concatenated. Use custom any class compatible with C++14.
-    std::vector<_any> m_Buffer = {};
-};
-
-}
-
-#endif /* AIDGE_CPARAMETER_H_ */
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..60f586edf947cef0e139049814263a29b4d01e24
--- /dev/null
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -0,0 +1,221 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_
+#define AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_
+
+#include <map>
+#include <vector>
+#include <type_traits>
+#include <typeinfo>
+#include <cassert>
+#include <string>
+
+#include "aidge/utils/Any.hpp"
+#include "aidge/utils/Attributes.hpp"
+
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <pybind11/embed.h>
+
+namespace py = pybind11;
+#endif
+
+
+namespace Aidge {
+
+///\todo store also a fix-sized code that indicates the type
+///\todo managing complex types or excluding non-trivial, non-aggregate types
+class DynamicAttributes : public Attributes {
+public:
+    /**
+     * \brief Returning an Attribute identified by its name
+     * \tparam T expected Attribute type
+     * \param name Attribute name
+     * \details assert if T is not the actual Attribute type or if the Attribute does not
+     *  exist
+     * \note at() throws if the Attribute does not exist, using find to test for Attribute existance
+     */
+    template<class T> T& getAttr(const std::string& name)
+    {
+#ifdef PYBIND
+        // If attribute does not exist in C++, it might have been created or modified in Python
+        auto it = mAttrs.find(name);
+        if (it == mAttrs.end()) {
+            auto itPy = mAttrsPy.find(name);
+            if (itPy != mAttrsPy.end()) {
+                // Insert the attribute back in C++
+                mAttrs.emplace(std::make_pair(name, libany::any(itPy->second.cast<T>())));
+            }
+        }
+#endif
+
+        return libany::any_cast<T&>(mAttrs.at(name));
+    }
+
+    template<class T> const T& getAttr(const std::string& name) const
+    {
+#ifdef PYBIND
+        // If attribute does not exist in C++, it might have been created or modified in Python
+        auto it = mAttrs.find(name);
+        if (it == mAttrs.end()) {
+            auto itPy = mAttrsPy.find(name);
+            if (itPy != mAttrsPy.end()) {
+                // Insert the attribute back in C++
+                mAttrs.emplace(std::make_pair(name, libany::any(itPy->second.cast<T>())));
+            }
+        }
+#endif
+
+        return libany::any_cast<const T&>(mAttrs.at(name));
+    }
+
+    ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
+    ///\tparam T expected Attribute type
+    ///\param name Attribute name
+    ///\param value Attribute value
+    template<class T> void addAttr(const std::string& name, const T& value)
+    {
+        const auto& res = mAttrs.emplace(std::make_pair(name, libany::any(value)));
+        assert(res.second && "attribute already exists");
+
+#ifdef PYBIND
+        // We cannot handle Python object if the Python interpreter is not running
+        if (Py_IsInitialized()) {
+            // Keep a copy of the attribute in py::object that is updated everytime
+            mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+        }
+#endif
+    }
+
+    ///\brief Set an Attribute value, identified by its name. If it already exists, its value (and type, if different) is changed.
+    ///\tparam T expected Attribute type
+    ///\param name Attribute name
+    ///\param value Attribute value
+    template<class T> void setAttr(const std::string& name, const T& value)
+    {
+        auto res = mAttrs.emplace(std::make_pair(name, libany::any(value)));
+        if (!res.second)
+            res.first->second = libany::any(value);
+
+#ifdef PYBIND
+        // We cannot handle Python object if the Python interpreter is not running
+        if (Py_IsInitialized()) {
+            // Keep a copy of the attribute in py::object that is updated everytime
+            auto resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+            if (!resPy.second)
+                resPy.first->second = std::move(py::cast(value));
+        }
+#endif
+    }
+
+    void delAttr(const std::string& name) {
+        mAttrs.erase(name);
+#ifdef PYBIND
+        mAttrsPy.erase(name);
+#endif
+    }
+
+#ifdef PYBIND
+    void addAttrPy(const std::string& name, py::object&& value)
+    {
+        auto it = mAttrs.find(name);
+        assert(it == mAttrs.end() && "attribute already exists");
+
+        const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
+        assert(res.second && "attribute already exists");
+    }
+
+    void setAttrPy(const std::string& name, py::object&& value)
+    {
+        auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
+        if (!resPy.second)
+            resPy.first->second = std::move(value);
+
+        // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
+        mAttrs.erase(name);
+    }
+#endif
+
+    //////////////////////////////////////
+    ///     Generic Attributes API
+    //////////////////////////////////////
+    bool hasAttr(const std::string& name) const override final {
+#ifdef PYBIND
+        // Attributes might have been created in Python, the second condition is necessary.
+        return (mAttrs.find(name) != mAttrs.end() || mAttrsPy.find(name) != mAttrsPy.end());
+#else
+        return (mAttrs.find(name) != mAttrs.end());
+#endif
+    }
+
+    std::string getAttrType(const std::string& name) const override final {
+        // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is:
+        // - C-style for C++ created attributes
+        // - Python-style for Python created attributes
+#ifdef PYBIND
+        // If attribute does not exist in C++, it might have been created in Python
+        auto it = mAttrs.find(name);
+        if (it == mAttrs.end()) {
+            auto itPy = mAttrsPy.find(name);
+            if (itPy != mAttrsPy.end()) {
+                return std::string(Py_TYPE(itPy->second.ptr())->tp_name);
+            }
+        }
+#endif
+
+        return mAttrs.at(name).type().name();
+    }
+
+    std::set<std::string> getAttrsName() const override final {
+        std::set<std::string> attrsName;
+        for(auto const& it: mAttrs)
+            attrsName.insert(it.first);
+#ifdef PYBIND
+        // Attributes might have been created in Python
+        for(auto const& it: mAttrsPy)
+            attrsName.insert(it.first);
+#endif
+        return attrsName;
+    }
+
+#ifdef PYBIND
+    /**
+     * @detail See https://github.com/pybind/pybind11/issues/1590 as to why a
+     * generic type caster for std::any is not feasable.
+     * The strategy here is to keep a copy of each attribute in py::object that is updated everytime.
+    */
+    py::object getAttrPy(const std::string& name) const {
+        return mAttrsPy.at(name);
+    };
+#endif
+
+private:
+#ifdef PYBIND
+    // Stores C++ attributes (copy) and Python-only attributes
+    // Code should be compiled with -fvisibility=hidden
+    // See https://pybind11.readthedocs.io/en/stable/faq.html:
+    // “‘SomeClass’ declared with greater visibility than the type of its 
+    // field ‘SomeClass::member’ [-Wattributes]”
+    // This map will only be populated if Python interpreter is running
+    std::map<std::string, py::object> mAttrsPy;
+    // Stores C++ attributes only
+    // mutable because it may be updated in getAttr() from Python
+    mutable std::map<std::string, libany::any> mAttrs;
+#else
+    std::map<std::string, libany::any> mAttrs;
+#endif
+};
+
+}
+
+#endif /* AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/Parameter.hpp b/include/aidge/utils/Parameter.hpp
deleted file mode 100644
index a475576170915182e25dbaa193ca8a7a3853c0e0..0000000000000000000000000000000000000000
--- a/include/aidge/utils/Parameter.hpp
+++ /dev/null
@@ -1,203 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CORE_UTILS_PARAMETER_H_
-#define AIDGE_CORE_UTILS_PARAMETER_H_
-
-#ifdef PYBIND
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
-#include <string> // Add this inclue to print error
-#endif
-#include <tuple>
-#include <cassert>
-#include <cstddef>
-
-#ifdef PYBIND
-namespace py = pybind11;
-#endif
-
-namespace {
-// This is the type that will hold all the strings. Each enumerate type will
-// declare its own specialization.
-template <typename T> struct EnumStrings {
-    static const char* const data[];
-};
-}
-
-namespace Aidge {
-template<class T, std::size_t N>
-constexpr std::size_t size(T (&)[N]) { return N; }
-
-#ifdef PYBIND
-/* This abstract class allows to avoid binding Parametrizable.
-*  Otherwise we would need to bind every template possible of Parametrizable.
-*  Every operators can access the methods of this class by inheriting from
-*  PyAbstractParametrizable in the binding code.
-*/
-class PyAbstractParametrizable{
-    public:
-        /* Bindable get function, does not recquire any templating.
-        *  This is thanks to py::object which allow the function to
-        *  be agnostic from its return type.
-        */
-        virtual py::object getPy(const char* /*name*/) = 0;
-};
-#endif
-
-template <class PARAM_ENUM, class ...T>
-class Parameterizable
-#ifdef PYBIND
-    : public PyAbstractParametrizable
-#endif
-    {
-public:
-    using Parameters = std::tuple<T...>;
-
-    // Helper class to pass to the constructor
-    template <PARAM_ENUM paramEnum>
-    class param {
-    public:
-        constexpr param(const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& v) : value(v) {}
-        const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type value;
-    };
-
-/*
-    // Direct tuple initialization
-    Parameterizable(T... params) : mParams({params...}) {
-
-    }
-*/
-
-    // Constructor for parameters initialization.
-    // Compile-time garantee that every parameter is initialized.
-    template <PARAM_ENUM ...paramEnum> // non-type parameter pack
-    constexpr Parameterizable(const param<paramEnum>&&... params) {
-        // Check number of params consistency
-        static_assert(sizeof...(params) == std::tuple_size<std::tuple<T...>>::value, "wrong number of parameters in constructor");
-        // static_assert(size(EnumStrings<PARAM_ENUM>::data) == std::tuple_size<std::tuple<T...>>::value, "wrong number of parameters in enum string");
-
-        // Check no duplicates
-        constexpr std::array<PARAM_ENUM, std::tuple_size<std::tuple<T...>>::value> pe = { paramEnum... };
-        static_assert(!hasDuplicates(pe), "duplicate parameter"); // requires C++14
-
-        // Init params with constructor arguments
-        const std::array<PARAM_ENUM, std::tuple_size<std::tuple<T...>>::value> p = { ((void)(get<paramEnum>() = params.value), paramEnum) ... };
-        (void)p; // avoid unused warning
-    }
-
-    Parameterizable(const Parameterizable& params):
-        mParams(params.mParams)
-    {
-        // cpy-ctor (required for Operator cpy-ctor)
-    }
-
-    // Compile-time access with enum
-    template <PARAM_ENUM paramEnum>
-    constexpr typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() {
-        return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
-    }
-
-    template <PARAM_ENUM paramEnum>
-    constexpr const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() const {
-        return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
-    }
-
-    // Runtime access with enum
-    template <typename R>
-    constexpr R& get(PARAM_ENUM paramEnum) {
-        return get<R>(static_cast<std::size_t>(paramEnum));
-    }
-
-    template <typename R>
-    constexpr const R& get(PARAM_ENUM paramEnum) const {
-        return get<R>(static_cast<std::size_t>(paramEnum));
-    }
-
-    // Runtime existance check with name
-    constexpr bool isParam(const char* name) const {
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-
-    // Runtime access with name
-    template <typename R>
-    constexpr R& get(const char* name) {
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
-                return get<R>(i);
-            }
-        }
-
-        assert(false && "parameter not found");
-    }
-
-    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
-    constexpr typename std::enable_if<(SIZE > 0), R&>::type get(std::size_t i) {
-        if (i == SIZE) {
-            if (std::is_same<R, typename std::tuple_element<SIZE,std::tuple<T...>>::type>::value) {
-                return reinterpret_cast<R&>(std::get<SIZE>(mParams));
-            }
-            else {
-                assert(false && "wrong parameter type");
-            }
-        }
-        else {
-            return get<R, SIZE-1>(i);
-        }
-    }
-
-    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
-    constexpr typename std::enable_if<(SIZE <= 0), R&>::type get(std::size_t i) {
-        assert(false && "parameter not found");
-    }
-
-    constexpr const std::tuple<T...>& getParams() const {
-        return mParams;
-    }
-
-    #ifdef PYBIND
-    py::object getPy(const char* name){
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
-                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
-                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
-                return py::detail::accessor_policies::tuple_item::get(py::cast(mParams), static_cast<py::size_t>(i));
-            }
-        }
-        throw py::value_error("Parameter : " + std::string(name) + " does not exist." );
-    };
-    #endif
-
-private:
-    template <typename V, std::size_t N>
-    static constexpr bool hasDuplicates(const std::array<V, N>& array) {
-        for (std::size_t i = 1; i < N; i++) {
-            for (std::size_t j = 0; j < i; j++) {
-                if (array[i] == array[j]) {
-                    return true;
-                }
-            }
-        }
-
-        return false;
-    }
-
-    std::tuple<T...> mParams;
-};
-}
-
-#endif /* AIDGE_CORE_UTILS_PARAMETER_H_ */
diff --git a/include/aidge/utils/Recipies.hpp b/include/aidge/utils/Recipies.hpp
index 4cbf8fd284bef314dbe28b19ebdae05172467bad..894e56fae2e9c2f6bcf11e4e76a433f5c8058080 100644
--- a/include/aidge/utils/Recipies.hpp
+++ b/include/aidge/utils/Recipies.hpp
@@ -17,11 +17,54 @@
 
 namespace Aidge{
 
+// FUSE MATMUL + ADD -> FC
+
+/**
+ * @brief Merge ``MatMul`` and :cpp:function:`Aidge::Add` Node into a :cpp:function:`Aidge::FC` Node.
+ *
+ * @param nodes Strict set of Node to merge.
+ */
 void fuseMulAdd(std::set<std::shared_ptr<Node>> nodes);
+/**
+ * @brief Merge ``MatMul`` and :cpp:function:`Aidge::Add` Node into a :cpp:function:`Aidge::FC` Node.
+ *
+ * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ */
+void fuseMulAdd(std::shared_ptr<GraphView> graphView);
+
+
+// REMOVE FLATTEN + FC -> FC
+
+/**
+ * @brief Remove ``Flatten`` before :cpp:function:`Aidge::FC` Node.
+ *
+ * @param nodes Strict set of Node to merge.
+ */
 void removeFlatten(std::set<std::shared_ptr<Node>> nodes);
+/**
+ * @brief Remove ``Flatten`` before :cpp:function:`Aidge::FC` Node.
+ *
+ * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ */
+void removeFlatten(std::shared_ptr<GraphView> graphView);
+ 
+// FUSE BN + FC || CONV -> FC || CONV
 
+/**
+ * @brief Fuse :cpp:function:`Aidge::BatchNorm` with :cpp:function:`Aidge::Conv` or :cpp:function:`Aidge::FC` Nodes.
+ * Ref: https://nenadmarkus.com/p/fusing-batchnorm-and-conv/
+ *
+ * @param nodes Strict set of Node to merge.
+ */
+void fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes);
+/**
+ * @brief Fuse :cpp:function:`Aidge::BatchNorm` with :cpp:function:`Aidge::Conv` or :cpp:function:`Aidge::FC` Nodes.
+ * Ref: https://nenadmarkus.com/p/fusing-batchnorm-and-conv/
+ *
+ * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ */
+void fuseBatchNorm(std::shared_ptr<GraphView> graphView);
 
 }
 
-
-#endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 98749c1349bad644dee2c1a8549559939791f71c..de543e95a16475c4443164af7be5c379d6554f8d 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -34,6 +34,7 @@ public:
     static std::map<Key, std::function<Func>>& registry()
     {
         #ifdef PYBIND
+        #define _CRT_SECURE_NO_WARNINGS
         if (std::getenv("AIDGE_CORE_WITH_PYBIND")){
             std::string name = std::string("registrar_")+typeid(Registrable<DerivedClass, Key, Func>).name();
             static auto shared_data = reinterpret_cast<std::map<Key, std::function<Func>> *>(py::get_shared_data(name));
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..fb800cffbcff5d4113961f8e62977417336f2cb8
--- /dev/null
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -0,0 +1,204 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
+#define AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
+
+#include <tuple>
+#include <cassert>
+#include <cstddef>
+#include <typeinfo>
+
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/Utils.hpp"
+
+namespace Aidge {
+/**
+ * @brief This class is designed to handle static attributes (i.e. known at compile-time) 
+ * with named accessors, with minimal overhead (the name strings are not stored in each object 
+ * instance and it remains possible to access attribute without overhead at compile-time).
+*/
+template <class ATTRS_ENUM, class ...T>
+class StaticAttributes : public Attributes {
+public:
+    using Attrs = std::tuple<T...>;
+
+    // Helper class to pass to the constructor
+    template <ATTRS_ENUM attrsEnum>
+    class attr {
+    public:
+        constexpr attr(const typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type& v) : value(v) {}
+        const typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type value;
+    };
+
+/*
+    // Direct tuple initialization
+    StaticAttributes(T... attrs) : mAttrs({attrs...}) {
+
+    }
+*/
+
+    // Constructor for Attributes initialization.
+    // Compile-time garantee that every attribute is initialized.
+    template <ATTRS_ENUM ...attrsEnum> // non-type attribute pack
+    constexpr StaticAttributes(const attr<attrsEnum>&&... attrs) {
+        // Check number of attrs consistency
+        static_assert(sizeof...(attrs) == std::tuple_size<std::tuple<T...>>::value, "wrong number of attributes in constructor");
+        // static_assert(size(EnumStrings<ATTRS_ENUM>::data) == std::tuple_size<std::tuple<T...>>::value, "wrong number of attributes in enum string");
+
+        // Check no duplicates
+        constexpr std::array<ATTRS_ENUM, std::tuple_size<std::tuple<T...>>::value> pe = { attrsEnum... };
+        static_assert(!hasDuplicates(pe), "duplicate attribute"); // requires C++14
+
+        // Init attrs with constructor arguments
+        const std::array<ATTRS_ENUM, std::tuple_size<std::tuple<T...>>::value> p = { ((void)(getAttr<attrsEnum>() = attrs.value), attrsEnum) ... };
+        (void)p; // avoid unused warning
+    }
+
+    // Compile-time access with enum
+    template <ATTRS_ENUM attrsEnum>
+    constexpr typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type& getAttr() {
+        return std::get<static_cast<std::size_t>(attrsEnum)>(mAttrs);
+    }
+
+    template <ATTRS_ENUM attrsEnum>
+    constexpr const typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type& getAttr() const {
+        return std::get<static_cast<std::size_t>(attrsEnum)>(mAttrs);
+    }
+
+    // Runtime access with enum
+    template <typename R>
+    constexpr R& getAttr(ATTRS_ENUM attrsEnum) {
+        return getAttr<R>(static_cast<std::size_t>(attrsEnum));
+    }
+
+    template <typename R>
+    constexpr const R& getAttr(ATTRS_ENUM attrsEnum) const {
+        return getAttr<R>(static_cast<std::size_t>(attrsEnum));
+    }
+
+    // Runtime access with name
+    template <typename R>
+    constexpr R& getAttr(const char* name) {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (strcmp(EnumStrings<ATTRS_ENUM>::data[i], name) == 0) {
+                return getAttr<R>(i);
+            }
+        }
+
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name);
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    constexpr typename std::enable_if<(SIZE > 0), R&>::type getAttr(std::size_t i) {
+        if (i == SIZE-1) {
+            if (std::is_same<R, typename std::tuple_element<SIZE-1,std::tuple<T...>>::type>::value) {
+                return reinterpret_cast<R&>(std::get<SIZE-1>(mAttrs));
+            }
+            else {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "wrong type for attribute with index %lu", i);
+            }
+        }
+        else {
+            return getAttr<R, SIZE-1>(i);
+        }
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    [[noreturn]] constexpr typename std::enable_if<(SIZE == 0), R&>::type getAttr(std::size_t /*i*/) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
+    }
+
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    constexpr typename std::enable_if<(SIZE > 0), const std::type_info&>::type getAttrType(std::size_t i) const {
+        if (i == SIZE-1) {
+            return typeid(typename std::tuple_element<SIZE-1,std::tuple<T...>>::type);
+        }
+        else {
+            return getAttrType<SIZE-1>(i);
+        }
+    }
+
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
+    [[noreturn]] constexpr typename std::enable_if<(SIZE == 0), const std::type_info&>::type getAttrType(std::size_t /*i*/) const {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
+    }
+
+    constexpr const std::tuple<T...>& getStaticAttributes() const {
+        return mAttrs;
+    }
+
+    //////////////////////////////////////
+    ///     Generic Attributes API
+    //////////////////////////////////////
+    // Runtime existance check with name
+    constexpr bool hasAttr(const std::string& name) const override final {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+
+    // Runtime type access with name
+    constexpr std::string getAttrType(const std::string& name) const override final {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                return getAttrType(i).name();
+            }
+        }
+
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"%s\" not found", name.c_str());
+    }
+
+    std::set<std::string> getAttrsName() const override final {
+        std::set<std::string> attrsName;
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            attrsName.insert(EnumStrings<ATTRS_ENUM>::data[i]);
+        }
+        return attrsName;
+    }
+
+    #ifdef PYBIND
+    py::object getAttrPy(const std::string& name) const {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
+                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
+                return py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+            }
+        }
+
+        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str());
+    };
+    #endif
+
+private:
+    template <typename V, std::size_t N>
+    static constexpr bool hasDuplicates(const std::array<V, N>& array) {
+        for (std::size_t i = 1; i < N; i++) {
+            for (std::size_t j = 0; j < i; j++) {
+                if (array[i] == array[j]) {
+                    return true;
+                }
+            }
+        }
+
+        return false;
+    }
+
+    std::tuple<T...> mAttrs;
+};
+}
+
+#endif /* AIDGE_CORE_UTILS_STATICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6387619546c66922e48cf95a8a56487d4b0d0641
--- /dev/null
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -0,0 +1,52 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_TENSOR_UTILS_H_
+#define AIDGE_CORE_UTILS_TENSOR_UTILS_H_
+#include <cmath>  // std::abs
+#include "aidge/data/Tensor.hpp"
+
+/**
+ * @brief Compare two :cpp:class:`Aidge::Tensor` value wise. The comparison function is:
+ *
+ * |t1-t2| <= absolute + relative * |t2|
+ *
+ * If a tensor value is different from the other tensor return False
+ * If the tensor does not have the same size, return False
+ * If the datatype is not the same between each tensor return False
+ * If the templated type does not correspond to the datatype of each tensor, raise an assertion error
+ *
+ * @tparam T should correspond to the type of the tensor, define the type of the absolute and relative error
+ * @param t1  first :cpp:class:`Aidge::Tensor` to test
+ * @param t2  second :cpp:class:`Aidge::Tensor` to test
+ * @param relative relative difference allowed (should be betwen 0 and 1)
+ * @param absolute absolute error allowed (shoulmd be positive)
+ * @return true if both tensor are approximately equal and have the datatype, shape. Else return false
+ */
+template <typename T>
+bool approxEq(Aidge::Tensor t1, Aidge::Tensor t2, float relative, float absolute){
+    assert(t1.dataType() == t2.dataType());
+    assert(t1.dataType() == NativeType<T>::type);
+    assert(relative >= 0);
+    assert(absolute >= 0 && absolute<=1);
+
+    if (t1.size() != t2.size()){
+        return false;
+    }
+    for(size_t i; i < t1.size(); ++i){
+        if (static_cast<float>(std::abs(t1.get<T>(i) - t2.get<T>(i))) > (absolute + (relative * static_cast<float>(std::abs(t2.get<T>(i)))))){
+            return false;
+        }
+    }
+    return true;
+}
+
+#endif /* AIDGE_CORE_UTILS_TENSOR_UTILS_H_s */
diff --git a/include/aidge/utils/Utils.hpp b/include/aidge/utils/Utils.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7c0c03c82ff252b6175d3c9bbe5395bb05127c9f
--- /dev/null
+++ b/include/aidge/utils/Utils.hpp
@@ -0,0 +1,37 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+
+#ifndef AIDGE_UTILS_H_
+#define AIDGE_UTILS_H_
+
+#include <cstdio>
+
+#ifdef NO_EXCEPTIONS
+#define AIDGE_THROW_OR_ABORT(ex, ...) \
+do { std::printf(__VA_ARGS__); std::abort(); } while (false)
+#else
+#include <stdexcept>
+#define AIDGE_THROW_OR_ABORT(ex, ...) \
+do { \
+    int n = 128; \
+    std::unique_ptr<char[]> formatted; \
+    formatted.reset(new char[n]); \
+    const int len = std::snprintf(formatted.get(), n, __VA_ARGS__); \
+    if (len >= n) { \
+        formatted.reset(new char[len + 1]); \
+        std::snprintf(formatted.get(), len + 1, __VA_ARGS__); \
+    }; \
+    throw ex(formatted.get()); \
+} while (false)
+#endif
+
+#endif //AIDGE_UTILS_H_
\ No newline at end of file
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 16f68907f47b7ecbf50e51555bdacfef58c8f2d9..44f8d95cdb39b8c4778136d87510a6358148bebc 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -33,28 +33,32 @@ void addCtor(py::class_<
                  std::tuple<std::string, DataType>,
                  detail::pimpl::ImplPtr_t(const Tensor&)>>& mTensor)
 {
-    mTensor.def(py::init([](py::array_t<T, py::array::c_style | py::array::forcecast> b) {
-        /* Request a buffer descriptor from Python */
-        py::buffer_info info = b.request();
-        Tensor* newTensor = new Tensor();
-        newTensor->setDatatype(NativeType<T>::type);
-        const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
-        newTensor->resize(dims);
-        // TODO : Find a better way to choose backend
-        std::set<std::string> availableBackends = Tensor::getAvailableBackends();
-        if (availableBackends.find("cpu") != availableBackends.end())
-        {
-            newTensor->setBackend("cpu");
-            newTensor->getImpl().setRawPtr(reinterpret_cast<Byte_t*>(info.ptr));
-        }
-        else
-        {
-            printf("Warning : Could not use aidge_cpu backend, verify you have "
-                   "`import aidge_cpu`\n");
-        }
+    mTensor
+        .def(py::init([](py::array_t<T, py::array::c_style | py::array::forcecast> b) {
+            /* Request a buffer descriptor from Python */
+            py::buffer_info info = b.request();
+            Tensor* newTensor = new Tensor();
+            newTensor->setDatatype(NativeType<T>::type);
+            const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
+            newTensor->resize(dims);
+            // TODO : Find a better way to choose backend
+            std::set<std::string> availableBackends = Tensor::getAvailableBackends();
+            if (availableBackends.find("cpu") != availableBackends.end())
+            {
+                newTensor->setBackend("cpu");
+                newTensor->getImpl().setRawPtr(reinterpret_cast<Byte_t*>(info.ptr));
+            }
+            else
+            {
+                printf("Warning : Could not use aidge_cpu backend, verify you have "
+                       "`import aidge_cpu`\n");
+            }
 
-        return newTensor;
-    }));
+            return newTensor;
+        }))
+        .def("__setitem__", (void (Tensor::*)(std::size_t, T)) & Tensor::set)
+        .def(
+            "__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) & Tensor::set);
 }
 
 void init_Tensor(py::module& m)
@@ -85,33 +89,48 @@ void init_Tensor(py::module& m)
         .def("dtype", &Tensor::dataType)
         .def("size", &Tensor::size)
         .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&)) & Tensor::resize)
-        .def("has_impl", &Tensor::HasImpl)
+        .def("has_impl", &Tensor::hasImpl)
+        .def("get_coord", &Tensor::getCoord)
+        .def("get_idx", &Tensor::getIdx)
         .def_static("get_available_backends", &Tensor::getAvailableBackends)
         .def("__str__", [](Tensor& b) { return b.toString(); })
         .def("__len__", [](Tensor& b) -> size_t { return b.size(); })
         .def(
             "__getitem__",
             [](Tensor& b, size_t idx) -> py::object {
-                // TODO : Should return error if backend not compatible with get
                 if (idx >= b.size())
                     throw py::index_error();
                 switch (b.dataType())
                 {
                     case DataType::Float64:
-                        return py::cast(
-                            reinterpret_cast<double const*>(b.getImpl().rawPtr())[idx]);
+                        return py::cast(b.get<double>(idx));
+                    case DataType::Float32:
+                        return py::cast(b.get<float>(idx));
+                    case DataType::Int32:
+                        return py::cast(b.get<int>(idx));
+                    default:
+                        return py::none();
+                }
+            })
+        .def(
+            "__getitem__",
+            [](Tensor& b, std::vector<size_t> coordIdx) -> py::object {
+                if (b.getIdx(coordIdx) >= b.size())
+                    throw py::index_error();
+                switch (b.dataType())
+                {
+                    case DataType::Float64:
+                        return py::cast(b.get<double>(coordIdx));
                     case DataType::Float32:
-                        return py::cast(
-                            reinterpret_cast<float const*>(b.getImpl().rawPtr())[idx]);
+                        return py::cast(b.get<float>(coordIdx));
                     case DataType::Int32:
-                        return py::cast(
-                            reinterpret_cast<int const*>(b.getImpl().rawPtr())[idx]);
+                        return py::cast(b.get<int>(coordIdx));
                     default:
                         return py::none();
                 }
             })
         .def_buffer([](Tensor& b) -> py::buffer_info {
-            TensorImpl& tensorImpl = b.getImpl();
+            const std::unique_ptr<TensorImpl>& tensorImpl = b.getImpl();
 
             std::vector<size_t> dims;
             std::vector<size_t> strides;
@@ -156,8 +175,8 @@ void init_Tensor(py::module& m)
     // convert the data to INT ! Need to find a way to avoid this !
     addCtor<int>(pyClassTensor);
     addCtor<float>(pyClassTensor);
-    // #if SIZE_MAX != 0xFFFFFFFF
+#if SIZE_MAX != 0xFFFFFFFF
     addCtor<double>(pyClassTensor);
-    // #endif
+#endif
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 3efcf7c5345bbc835aeaf6dcbc416769b8654439..ab8b4cf7b91d5eea2db5245a8c5122ab004b4766 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -12,7 +12,6 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/Add.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index ecbb743d33cc5750bc60aeed8e5207dcec0c23dc..372afebdd3e1626cd0af88e335b78ec7fd73a5f4 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -16,7 +16,6 @@
 #include <vector>
 #include <array>
 
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -27,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
-  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
+  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Operator, Attributes>(
     m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 70d9bce003033e1264ac39764271773fa84c760f..f43381fecc689a292e166c4da40ea0cb4842c9e6 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -14,7 +14,6 @@
 
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
@@ -22,7 +21,7 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, PyAbstractParametrizable>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, Attributes>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
 
     m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 7e366305f287e958ea7500695c1f3285908017b1..0c09917d71e520227eed48705527adaf204857ee 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -16,7 +16,6 @@
 #include <vector>
 #include <array>
 
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -26,7 +25,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
-  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Operator, PyAbstractParametrizable>(
+  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Operator, Attributes>(
     m, ("ConvOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<DimSize_t,
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 8a81e7ba184536cbd535db24519495400bce6fdb..3f48c50f7ffdb44450c0e2a155d85dcbf9f73fd9 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -16,7 +16,6 @@
 #include <vector>
 #include <array>
 
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -27,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
-  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Operator, PyAbstractParametrizable>(
+  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Operator, Attributes>(
     m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 3b4137c6f208f96d256c72300437cc978658b84f..4b9d61d082ebed4d426b41efa071d3943f83d231 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -12,7 +12,6 @@
 #include <pybind11/pybind11.h>
 
 #include "aidge/operator/FC.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
@@ -21,7 +20,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, PyAbstractParametrizable>(m, "FC_Op", py::multiple_inheritance());
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Attributes>(m, "FC_Op", py::multiple_inheritance());
 
   m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index dfd2cfedec5aa291f11cf7c2a93d750c3d91145f..4cf4dae2234900722058d6555582c5b78900ab7d 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -21,50 +21,11 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
-    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator>(m, "GenericOperatorOp",
+    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator, DynamicAttributes>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
-    .def("get_parameter_type", &GenericOperator_Op::getParameterType)
-    .def("get_parameters_name", &GenericOperator_Op::getParametersName)
-    .def("add_parameter", &GenericOperator_Op::addParameter<bool>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<int>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<float>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::string>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<bool>>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<int>>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<float>>)
-    .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<std::string>>)
-    .def("get_parameter", [](GenericOperator_Op& self, std::string key) -> py::object {
-        /*
-        This getParameter method returns the good python type without having to have
-        prior knowledge of the parameter type.
-        */
-        py::object res = py::none();
-        std::string paramType = self.getParameterType(key);
-        if(paramType == typeid(int).name())
-            res = py::cast(self.getParameter<int>(key));
-        else if(paramType == typeid(float).name())
-            res = py::cast(self.getParameter<float>(key));
-        else if(paramType == typeid(bool).name())
-            res = py::cast(self.getParameter<bool>(key));
-        else if(paramType == typeid(std::string).name())
-            res = py::cast(self.getParameter<std::string>(key));
-        else if(paramType == typeid(std::vector<bool>).name())
-            res = py::cast(self.getParameter<std::vector<bool>>(key));
-        else if(paramType == typeid(std::vector<int>).name())
-            res = py::cast(self.getParameter<std::vector<int>>(key));
-        else if(paramType == typeid(std::vector<float>).name())
-            res = py::cast(self.getParameter<std::vector<float>>(key));
-        else if(paramType == typeid(std::vector<std::string>).name())
-            res = py::cast(self.getParameter<std::vector<std::string>>(key));
-        else {
-            throw py::key_error("Failed to convert parameter type " + key + ", this issue may come from typeid function which gave an unknown key : [" + paramType + "]. Please open an issue asking to add the support for this key.");
-        }
-        return res;
-    })
     .def_readonly_static("identity", &GenericOperator_Op::Identity)
     .def("compute_output_dims", &GenericOperator_Op::computeOutputDims)
-    .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"))
-    ;
+    .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"));
 
     m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nbDataIn"), py::arg("nbIn"), py::arg("nbOut"),
           py::arg("name") = "");
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index c062d93f5c40fe46336fe34f6d1664f24da07732..cae8a88bab7b59189dfbc6528cd653f1c97cb73a 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -13,13 +13,12 @@
 
 #include "aidge/operator/LeakyReLU.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/Parameter.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, PyAbstractParametrizable>(m, "LeakyReLU_Op", py::multiple_inheritance());
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, Attributes>(m, "LeakyReLU_Op", py::multiple_inheritance());
 
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index b6ae27289fabe1fe4dbeea60704a61373bc850cf..2f738550041bcdb1ae809d68fa24fdf5a72e9164 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -11,8 +11,7 @@
 
 #include <pybind11/pybind11.h>
 
-#include "aidge/operator/Matmul.hpp"
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/operator/MatMul.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
@@ -20,13 +19,13 @@
 namespace py = pybind11;
 namespace Aidge {
 
-void declare_Matmul(py::module &m) {
-  py::class_<Matmul_Op, std::shared_ptr<Matmul_Op>, Operator, PyAbstractParametrizable>(m, "Matmul_Op", py::multiple_inheritance());
+void declare_MatMul(py::module &m) {
+  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Operator, Attributes>(m, "MatMul_Op", py::multiple_inheritance());
 
-  m.def("Matmul", &Matmul, py::arg("out_channels"), py::arg("name") = "");
+  m.def("MatMul", &MatMul, py::arg("out_channels"), py::arg("name") = "");
 }
 
-void init_Matmul(py::module &m) {
-  declare_Matmul(m);
+void init_MatMul(py::module &m) {
+  declare_MatMul(m);
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 9bd951c446e080ff27b099527ac9bbc350646140..2efd18c816c2d588e574872b3d3776a3409dc4ba 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -16,7 +16,6 @@
 #include <vector>
 #include <array>
 
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/Operator.hpp"
@@ -27,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
-  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, Attributes>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index ea9880800059e8993996e67138f89419c165fc4f..1c62cd0adf6b8712073ec0674754ce7c8c2014a5 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -13,7 +13,6 @@
 #include <pybind11/stl.h>
 
 #include "aidge/utils/Types.h"
-#include "aidge/utils/Parameter.hpp"
 // #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
@@ -26,18 +25,19 @@ template <DimIdx_t DIM>
 void declare_Producer(py::module &m) {
     // m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name"));
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&)>(&Producer), py::arg("dims"), py::arg("name") = "");
-    
+
 }
 
 
 void init_Producer(py::module &m) {
     py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, Operator>(
-        m, 
-        "ProducerOp", 
+        m,
+        "ProducerOp",
         py::multiple_inheritance())
-    .def("dims", &Producer_Op::dims);
+    .def("dims", &Producer_Op::dims)
+    .def("set_output_tensor", &Producer_Op::setOutputTensor);
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = "");
-    
+
     declare_Producer<1>(m);
     declare_Producer<2>(m);
     declare_Producer<3>(m);
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 78418d51a5c410cb56bb8421fd7f3dc6ec6d32db..d1287c0a928ae2ad27a839cec1c3d3955da65538 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -17,7 +17,7 @@ namespace Aidge {
 void init_Data(py::module&);
 void init_Tensor(py::module&);
 void init_OperatorImpl(py::module&);
-void init_Parameterizable(py::module&);
+void init_Attributes(py::module&);
 void init_Operator(py::module&);
 
 void init_Add(py::module&);
@@ -28,7 +28,7 @@ void init_ConvDepthWise(py::module&);
 void init_FC(py::module&);
 void init_GenericOperator(py::module&);
 void init_LeakyReLU(py::module&);
-void init_Matmul(py::module&);
+void init_MatMul(py::module&);
 void init_MaxPooling(py::module&);
 void init_Producer(py::module&);
 void init_ReLU(py::module&);
@@ -46,7 +46,7 @@ void init_GRegex(py::module&);
 void init_Recipies(py::module&);
 
 void init_Scheduler(py::module&);
-
+void init_TensorUtils(py::module&);
 
 void set_python_flag(){
     // Set an env variable to know if we run with ypthon or cpp
@@ -65,7 +65,7 @@ void init_Aidge(py::module& m){
     init_Connector(m);
 
     init_OperatorImpl(m);
-    init_Parameterizable(m);
+    init_Attributes(m);
     init_Operator(m);
     init_Add(m);
     init_AvgPooling(m);
@@ -75,7 +75,7 @@ void init_Aidge(py::module& m){
     init_FC(m);
     init_GenericOperator(m);
     init_LeakyReLU(m);
-    init_Matmul(m);
+    init_MatMul(m);
     init_MaxPooling(m);
     init_ReLU(m);
     init_Softmax(m);
@@ -86,6 +86,7 @@ void init_Aidge(py::module& m){
     init_GRegex(m);
     init_Recipies(m);
     init_Scheduler(m);
+    init_TensorUtils(m);
 }
 
 PYBIND11_MODULE(aidge_core, m) {
diff --git a/python_binding/recipies/pybind_Recipies.cpp b/python_binding/recipies/pybind_Recipies.cpp
index b4147dcb4fb82dbfe9f5b4605604725c6945ece9..93c131ef7417135bfdbc657c5c809339430616ed 100644
--- a/python_binding/recipies/pybind_Recipies.cpp
+++ b/python_binding/recipies/pybind_Recipies.cpp
@@ -20,24 +20,51 @@ namespace py = pybind11;
 
 namespace Aidge {
 void init_Recipies(py::module &m) {
-  m.def("fuse_mul_add", &fuseMulAdd, py::arg("nodes"), R"mydelimiter(
-    Recipie to Fuse MatMul and Add operators into an `aidge.FC` operator.
-    
-    Parameters
-    ----------
+
+
+  m.def("fuse_mul_add", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseMulAdd), py::arg("graph_view"), R"mydelimiter(
+    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+
+    :param graph_view: Graph view on which we want to apply the recipie
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
+  m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
+    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+
     :param nodes: The MatMul and Add nodes to fuse.
-    :type nodes: list of `aidge.node`
+    :type nodes: list of :py:class:`aidge_core.Node`
+    )mydelimiter");
+
+  m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
+    Recipie to remove a flatten operator.
 
+    :param graph_view: Graph view on which we want to apply the recipie
+    :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
-  m.def("remove_flatten", &removeFlatten, py::arg("nodes"), R"mydelimiter(
+  m.def("remove_flatten", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(removeFlatten), py::arg("nodes"), R"mydelimiter(
     Recipie to remove a flatten operator.
-    
-    Parameters
-    ----------
+
     :param nodes: The flatten operator to remove.
-    :type nodes: list of `aidge.node`
+    :type nodes: list of :py:class:`aidge_core.Node`
+    )mydelimiter");
+  m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
+    Recipie to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
+    :param nodes: The MatMul and Add nodes to fuse.
+    :type nodes: list of :py:class:`aidge_core.Node`
+    )mydelimiter");
+
+  m.def("fuse_batchnorm", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseBatchNorm), py::arg("graph_view"), R"mydelimiter(
+    Recipie to remove a flatten operator.
+
+    :param graph_view: Graph view on which we want to apply the recipie
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
+  m.def("fuse_batchnorm", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseBatchNorm), py::arg("nodes"), R"mydelimiter(
+    Recipie to remove a flatten operator.
+
+    :param nodes: The flatten operator to remove.
+    :type nodes: list of :py:class:`aidge_core.Node`
     )mydelimiter");
-  
 }
 } // namespace Aidge
diff --git a/python_binding/utils/pybind_Parameter.cpp b/python_binding/utils/pybind_Parameter.cpp
index 358316ea00413813d6d482a8a4601e69af3aa992..2957876f31ad0781a36905cef3a5ae88934b6a8a 100644
--- a/python_binding/utils/pybind_Parameter.cpp
+++ b/python_binding/utils/pybind_Parameter.cpp
@@ -1,12 +1,36 @@
 #include <pybind11/pybind11.h>
-#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
-void init_Parameterizable(py::module& m){
-    py::class_<PyAbstractParametrizable, std::shared_ptr<PyAbstractParametrizable>>(m, "PyAbstractParametrizable")
-    .def("get", &PyAbstractParametrizable::getPy, py::arg("name"))
-    ;
+DynamicAttributes test_DynamicAttributes_binding() {
+    DynamicAttributes attrs;
+    attrs.addAttr<int>("a", 42);
+    attrs.addAttr<std::string>("b", "test");
+    attrs.addAttr<std::vector<bool>>("c", {true, false, true});
+    return attrs;
 }
+
+double test_DynamicAttributes_binding_check(DynamicAttributes& attrs) {
+    return attrs.getAttr<double>("d");
+}
+
+void init_Attributes(py::module& m){
+    py::class_<Attributes, std::shared_ptr<Attributes>>(m, "Attributes")
+    .def("has_attr", &Attributes::hasAttr, py::arg("name"))
+    .def("get_attr_type", &Attributes::getAttrType, py::arg("name"))
+    .def("get_attrs_name", &Attributes::getAttrsName)
+    .def("get_attr", &Attributes::getAttrPy, py::arg("name"));
+
+    py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
+    .def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
+    .def("set_attr", &DynamicAttributes::setAttrPy, py::arg("name"), py::arg("value"))
+    .def("del_attr", &DynamicAttributes::delAttr, py::arg("name"));
+
+    m.def("test_DynamicAttributes_binding", &test_DynamicAttributes_binding);
+    m.def("test_DynamicAttributes_binding_check", &test_DynamicAttributes_binding_check, py::arg("attrs"));
+}
+
 }
 
diff --git a/python_binding/utils/pybind_TensorUtils.cpp b/python_binding/utils/pybind_TensorUtils.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..78825a5f3b8d45f22f76c57bd780dc7019fbc123
--- /dev/null
+++ b/python_binding/utils/pybind_TensorUtils.cpp
@@ -0,0 +1,57 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include <string>
+
+#include "aidge/utils/TensorUtils.hpp"
+
+namespace py = pybind11;
+
+namespace Aidge {
+
+template<typename T>
+void addTensorUtilsFunction(py::module &m){
+    m.def("approx_eq",
+    & approxEq<T>,
+    py::arg("t1"),
+    py::arg("t2"),
+    py::arg("relative"),
+    py::arg("absolute"),
+    R"mydelimiter(
+        Compare two :cpp:class:`Aidge::Tensor` value wise. The comparison function is:
+            |t1-t2| <= absolute + relative * |t2|
+
+        If a tensor value is different from the other tensor return False
+        If the tensor does not have the same size, return False
+        If the datatype is not the same between each tensor return False
+        If the templated type does not correspond to the datatype of each tensor, raise an assertion error
+
+        :param t1: first tensor to test
+        :type t1: :py:class:`aidge_core.Tensor`
+        :param t2: second tensor to test
+        :type t2: :py:class:`aidge_core.Tensor`
+        :param relative: relative difference allowed (should be betwen 0 and 1)
+        :type relative: float
+        :param absolute: absolute error allowed (shoulmd be positive)
+        :type absolute: float
+        )mydelimiter");
+}
+
+void init_TensorUtils(py::module &m) {
+    addTensorUtilsFunction<float>(m);
+    addTensorUtilsFunction<double>(m);
+    addTensorUtilsFunction<int>(m);
+    addTensorUtilsFunction<long>(m);
+}
+} // namespace Aidge
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index d1a58a0606d13e3a0e4132ea49246c1345495abb..13fa49279e4490a9d68ddffc47d431bfe933d2ec 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -36,7 +36,7 @@ bool Tensor::operator==(const Tensor &otherTensor) const noexcept
 /// @bug So far copy between different backend is not supported
 void Tensor::setBackend(const std::string &name)
 {
-    if (HasImpl())
+    if (hasImpl())
     {
         if (strcmp(mImpl->backend(), name.c_str()) != 0)
         {
@@ -85,6 +85,12 @@ Byte_t *Tensor::GetDataAddress() noexcept
     return mImpl->GetDataAddress();
 }
 
+/// @brief Getting the size of the stored data type in bytes.
+std::size_t Tensor::GetScalarSize() const noexcept
+{
+    return mImpl->GetScalarSize();
+}
+
 /// @brief Copy dimensions, datatype and data of another Tensor.
 Tensor &Tensor::operator=(const Tensor &t)
 {
@@ -107,7 +113,7 @@ Tensor &Tensor::operator=(const Tensor &t)
 /// wrong
 void Tensor::setDatatype(const DataType dt)
 {
-    if (HasImpl() && (dataType() != dt))
+    if (hasImpl() && (dataType() != dt))
     {
         mDataType = dt;
         detail::pimpl::ImplPtr_t newImpl
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index bbf895285e0e00d1132eb1f46c7e67a455d705d7..03b2a9adb439eb00d0ba59a13fead4f25d617b36 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -519,17 +519,17 @@ void Aidge::GraphView::link(std::string /*name1_inID*/,
   printf("Not implemented yet.\n");
 }
 
-void Aidge::GraphView::insertParent(NodePtr childNode, 
-                  NodePtr newParentNode, 
-                  IOIndex_t childInputTensorIdx, 
-                  IOIndex_t newParentInputTensorIdx, 
+void Aidge::GraphView::insertParent(NodePtr childNode,
+                  NodePtr newParentNode,
+                  IOIndex_t childInputTensorIdx,
+                  IOIndex_t newParentInputTensorIdx,
                   IOIndex_t newParentOutputTensorIdx){
   NodePtr currentParentNode = childNode->getParent(childInputTensorIdx);
   const IOIndex_t currentParentOutputTensorIdx = childNode->input(childInputTensorIdx).second;
-  // Remove child from current parent & current Parent from child 
+  // Remove child from current parent & current Parent from child
   currentParentNode->removeChild(childNode, currentParentOutputTensorIdx);
 
-  // Add child 
+  // Add child
   currentParentNode->addChild(newParentNode,currentParentOutputTensorIdx, newParentInputTensorIdx);
   newParentNode->addChild(childNode, newParentOutputTensorIdx, childInputTensorIdx);
 
@@ -542,9 +542,8 @@ bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
   assert(mNodes.size()>0 && "There must be at least one Node to replace");
 
   bool replacable;
-  std::shared_ptr<Node> previousInputNode;
-  std::shared_ptr<Node> newInputNode;
-  std::shared_ptr<Node> previousOutputNode;
+  std::shared_ptr<Node> previousInputNode = (*inputNodes().begin());
+  std::shared_ptr<Node> previousOutputNode = (*outputNodes().begin());
   std::shared_ptr<Node> newOutputNode;
 
   auto gNew = std::make_shared<GraphView>();
@@ -552,18 +551,15 @@ bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
 
   if (newNodes.empty()) {
     replacable = (outputNodes().size() == 1) &&
-                      (inputNodes().size() == 1) &&
-                      ((*outputNodes().begin())->nbOutputs() == 1) &&
-                      ((*inputNodes().begin())->nbInputs() == 1);
-    previousOutputNode = (*outputNodes().begin());
-    previousInputNode = (*inputNodes().begin());
+                 (inputNodes().size() == 1) &&
+                 ((*outputNodes().begin())->nbOutputs() == 1) &&
+                 ((*inputNodes().begin())->nbDataInputs() == 1);
     newOutputNode = previousInputNode->input(0).first;
   } else {
-    replacable = ((outputNodes().size() == gNew->outputNodes().size()) &&
-                     (outputNodes().size() == 1));
-    previousOutputNode = (*outputNodes().begin());
     newOutputNode = (*gNew->outputNodes().begin());
-    replacable = replacable && (previousOutputNode->nbOutputs() == newOutputNode->nbOutputs());
+    replacable = (outputNodes().size() == gNew->outputNodes().size()) &&
+                 (outputNodes().size() == 1) &&
+                 (previousOutputNode->nbOutputs() == newOutputNode->nbOutputs());
   }
 
   if (replacable) {
diff --git a/src/graphmatching/NodeRegex.cpp b/src/graphmatching/NodeRegex.cpp
index bbb116d1b12a31b491b26d2a64d04b416b61c6b7..9bf164f60255c17492e528b0f27dec8c53f74979 100644
--- a/src/graphmatching/NodeRegex.cpp
+++ b/src/graphmatching/NodeRegex.cpp
@@ -12,7 +12,7 @@
 #include "aidge/graphmatching/NodeRegex.hpp"
 
 
-// Verification done by the Parameter system
+// Verification done by the Attribute system
 
 
 // Version 1 - Only test the type of the node (no need for a lexer)
@@ -39,8 +39,8 @@ bool Aidge::NodeRegex::isA(std::string NodeType){
 /**bool NodeRegex::_is(string &Node_op){
     // Parsing the condition is done in the initialization of the NodeRegex
     
-    // assert parameters exist in the node with the parameter function isParam()
+    // assert attributes exist in the node with the attribute function hasAttr()
 
-    // get the parameters
+    // get the attributes
 
 }*/
diff --git a/src/recipies/FuseBatchNorm.cpp b/src/recipies/FuseBatchNorm.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..83a373c663fdfe25fd8cfb35130649fe7491ab49
--- /dev/null
+++ b/src/recipies/FuseBatchNorm.cpp
@@ -0,0 +1,174 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/FC.hpp"
+#include <cassert>
+#include <memory>
+#include <set>
+#include <string>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Recipies.hpp"
+// Graph Regex
+#include "aidge/graphmatching/GRegex.hpp"
+#include "aidge/graphmatching/NodeRegex.hpp"
+using namespace Aidge;
+
+void Aidge::fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes)
+{
+    assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
+
+    // Assert the nodes types are correct to be fused
+    std::shared_ptr<Node> conv;
+    std::shared_ptr<Node> batchnorm;
+    for (const auto& element : nodes)
+    {
+        assert(
+            (element->type() == "Conv" || element->type() == "BatchNorm")
+            && "Wrong type for the nodes to replace");
+        if (element->type() == "Conv")
+        {
+            conv = element;
+        }
+        else if (element->type() == "BatchNorm")
+        {
+            batchnorm = element;
+        }
+    }
+    // TODO : check if batchnorm is the only child of the Conv or FC
+    std::shared_ptr<Tensor> scale
+        = batchnorm->input(1).first->getOperator()->getOutput(batchnorm->input(1).second);
+    std::shared_ptr<Tensor> shift
+        = batchnorm->input(2).first->getOperator()->getOutput(batchnorm->input(2).second);
+    std::shared_ptr<Tensor> b_mean
+        = batchnorm->input(3).first->getOperator()->getOutput(batchnorm->input(3).second);
+    std::shared_ptr<Tensor> b_var
+        = batchnorm->input(4).first->getOperator()->getOutput(batchnorm->input(4).second);
+
+    // TODO : Find a way to remove the template
+    const float epsilon
+        = std::static_pointer_cast<BatchNorm_Op<2>>(batchnorm->getOperator())
+              ->getAttr<float>("Epsilon");
+    DimSize_t convOutDims = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())
+                                ->getAttr<DimSize_t>("OutChannels");
+
+    assert(scale->size() == convOutDims);
+    assert(shift->size() == convOutDims);
+    assert(b_mean->size() == convOutDims);
+    assert(b_var->size() == convOutDims);
+    assert(epsilon > 0.0);
+    // TODO : no no_bias attribute ?
+    float meanVariance = 0.0;
+    unsigned int count = 0;
+
+    for (std::size_t output = 0; output < convOutDims; ++output)
+    {
+        // TODO : get suppose datatype is float ..
+        if (b_var->get<float>(output) > 1.0e-12)
+        {
+            meanVariance += b_var->get<float>(output);
+            ++count;
+        }
+        else
+        {
+            printf("Zero-variance: %s [%lu]\n", conv->name().c_str(), output);
+        }
+    }
+    if (count > 0)
+        meanVariance /= count;
+    else
+    {
+        printf("variance < 1e-12 for all outputs! Is the network correctly trained?\n");
+    }
+
+    const DimSize_t channelsSize
+        = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())
+              ->getAttr<DimSize_t>("InChannels");
+
+    // TODO : suppose we have Conv2D ...
+    const std::array<DimSize_t, 2> kernelDims
+        = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())
+              ->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+
+    std::shared_ptr<Tensor> weight
+        = conv->input(1).first->getOperator()->getOutput(conv->input(1).second);
+    std::shared_ptr<Tensor> bias
+        = conv->input(2).first->getOperator()->getOutput(conv->input(2).second);
+
+    for (Coord_t output = 0; output < convOutDims; ++output)
+    {
+        // Corrected for zero-variance issue:
+        // "A Quantization-Friendly Separable Convolution for MobileNets"
+        // https://arxiv.org/pdf/1803.08607.pdf
+        // to help post-training quantization
+        const float factor = scale->get<float>(output)
+                             / std::sqrt(
+                                 epsilon
+                                 + ((b_var->get<float>(output) > 1.0e-12 || count == 0) ?
+                                        b_var->get<float>(output) :
+                                        meanVariance));
+        // Weights adjustments
+        for (Coord_t channel = 0; channel < channelsSize; ++channel)
+        {
+            // TODO : Suppose kerneldims = 2
+            for (Coord_t k0 = 0; k0 < kernelDims[0]; ++k0)
+            {
+                for (Coord_t k1 = 0; k1 < kernelDims[1]; ++k1)
+                {
+                    std::vector<Coord_t> currentIdx = {output, channel, k0, k1};
+                    // TODO : suppose weights are float
+                    float weightValue = weight->get<float>(currentIdx);
+                    weight->set<float>(
+                        currentIdx,
+                        weightValue * factor); // Update check it update Conv weights
+                }
+            }
+        }
+
+        // TODO : check if noBias==true is set, then set biasValue to 0
+        float biasValue = bias->get<float>(output);
+
+        biasValue = shift->get<float>(output)
+                    + (biasValue - b_mean->get<float>(output)) * factor;
+
+        bias->set<float>(output, biasValue);
+    }
+    auto g = std::make_shared<GraphView>();
+    g->add(std::set<std::shared_ptr<Node>>(
+        {batchnorm,
+         batchnorm->input(1).first,
+         batchnorm->input(2).first,
+         batchnorm->input(3).first,
+         batchnorm->input(4).first}));
+    g->replaceWith({});
+}
+
+void Aidge::fuseBatchNorm(std::shared_ptr<GraphView> graphView)
+{
+    std::map<std::string, NodeRegex*> nodesRegex;
+    nodesRegex["BatchNorm"] = new NodeRegex("BatchNorm");
+    nodesRegex["Conv"] = new NodeRegex("Conv");
+    nodesRegex["FC"] = new NodeRegex("FC");
+
+    std::vector<std::string> seqRegex;
+    seqRegex.push_back("Conv -> BatchNorm;"); // TODO: Add (Conv | FC)
+    GRegex GReg(nodesRegex, seqRegex);
+    Match matches = GReg.match(graphView);
+    std::vector<std::set<std::shared_ptr<Node>>> matchNodes = matches.getMatchNodes();
+    for (size_t i = 0; i < matches.getNbMatch(); ++i)
+    {
+        fuseBatchNorm(matchNodes[i]);
+    }
+}
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipies/FuseMulAdd.cpp
index 561d25776a28f1aad8f8c943711887ec6661a10c..1de79890f9b597c4baff7427e01d7217f9695a44 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipies/FuseMulAdd.cpp
@@ -20,21 +20,18 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/GenericOperator.hpp"
-
+// Graph Regex
+#include "aidge/graphmatching/GRegex.hpp"
+#include "aidge/graphmatching/NodeRegex.hpp"
 using namespace Aidge;
 
-/**
- * @brief Merge MatMul and Add Node into FC.
- * 
- * @param nodes Strict set of Node to merge.
- */
 void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
     // Fuse Mulmat & Add into FC
     // Inputs : old nodes (pointers on mul & add)
-    
+
     assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
     // Too bad we lose information on the type after matching, how to keep the information after matching (not only for the type) ?
-    
+
     // Step 0 : Assert the nodes types are correct to be fused
     std::shared_ptr<Node> add;
     std::shared_ptr<Node> matmul;
@@ -53,7 +50,7 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
     auto producer_add_bias = add->input(1);
     Tensor& bias_tensor = (producer_add_bias.first)->getOperator()->output(0);
 
-    // Instanciate FC  
+    // Instanciate FC
     //std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
     std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(bias_tensor.dims()[0], false));
 
@@ -61,10 +58,12 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
     // link weights & bias
     if (matmul->getParent(1)==nullptr) {
         matmul->getParent(0)->addChild(fc, 0, 1);
+        printf("MatMul out[1] == nullptr !\n");
     } else {
+        printf("MatMul out[1] != nullptr !\n");
         if (matmul->getParent(0)!=nullptr)
             matmul->getParent(0)->addChild(fc, 0, 0);
-        matmul->getParent(1)->addChild(fc, 0, 1);
+        matmul->input(1).first->addChild(fc, 0, 1);
     }
     (producer_add_bias.first)->addChild(fc,0,2);
 
@@ -74,7 +73,22 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
         // Case 2 : If not all nodes are in a graph view : only delete the nodes from the graphview
         // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory ?
     auto nodeToReplace = std::make_shared<GraphView>();
-    nodeToReplace->add(nodes);
+    nodeToReplace->add(nodes, false);
     nodeToReplace->replaceWith({fc});
 
-}
\ No newline at end of file
+}
+
+void Aidge::fuseMulAdd(std::shared_ptr<GraphView> graphView){
+
+    std::map<std::string,NodeRegex*> nodesRegex ;
+    nodesRegex["MatMul"] = new NodeRegex("MatMul");
+    nodesRegex["Add"] = new NodeRegex("Add");
+    std::vector<std::string> seqRegex;
+    seqRegex.push_back("MatMul -> Add;");
+    GRegex GReg(nodesRegex, seqRegex);
+    Match matches = GReg.match(graphView);
+    std::vector<std::set<std::shared_ptr<Node>>> matchNodes = matches.getMatchNodes();
+    for (size_t i = 0; i < matches.getNbMatch(); ++i) {
+        fuseMulAdd(matchNodes[i]);
+    }
+}
diff --git a/src/recipies/LabelGraph.cpp b/src/recipies/LabelGraph.cpp
index 7ac2cbf6ca65c7ecbced9596efb71c2052405984..369336f7981198f962d8ab949309005be9ac5eb9 100644
--- a/src/recipies/LabelGraph.cpp
+++ b/src/recipies/LabelGraph.cpp
@@ -22,7 +22,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == Conv_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->get<ConvParam::KernelDims>(), op->get<ConvParam::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<ConvAttr::KernelDims>(), op->getAttr<ConvAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -30,7 +30,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == ConvDepthWise_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<ConvDepthWise_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->get<ConvDepthWiseParam::KernelDims>(), op->get<ConvDepthWiseParam::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<ConvDepthWiseAttr::KernelDims>(), op->getAttr<ConvDepthWiseAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -38,7 +38,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == AvgPooling_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<AvgPooling_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->get<AvgPoolingParam::KernelDims>(), op->get<AvgPoolingParam::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<AvgPoolingAttr::KernelDims>(), op->getAttr<AvgPoolingAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
diff --git a/src/recipies/RemoveFlatten.cpp b/src/recipies/RemoveFlatten.cpp
index cc3c3324e40636a1edcbc73cdc4a9dcfeec8a026..9096c107ba505f5f18993a761273552408db721b 100644
--- a/src/recipies/RemoveFlatten.cpp
+++ b/src/recipies/RemoveFlatten.cpp
@@ -15,10 +15,38 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/utils/Recipies.hpp"
 
+// Graph Regex
+#include "aidge/graphmatching/GRegex.hpp"
+#include "aidge/graphmatching/NodeRegex.hpp"
+
+
 namespace Aidge {
     void removeFlatten(std::set<std::shared_ptr<Node>> nodes) {
+        assert(nodes.size() == 2 && "Wrong number of nodes to replace\n");
+        std::shared_ptr<Node> flatten;
+        for (const auto& element : nodes) {
+            assert((element->type() == "FC" || element->type() == "Flatten") && "Wrong type for the nodes to replace");
+            if (element->type() == "Flatten"){
+                flatten = element;
+            }
+        }
         auto g = std::make_shared<GraphView>();
-        g->add(std::set<std::shared_ptr<Node>>({nodes}));
+        // TODO : avoid using replace_with and use a remove method instead
+        g->add(std::set<std::shared_ptr<Node>>({flatten}));
         g->replaceWith({});
     }
-}
\ No newline at end of file
+
+    void removeFlatten(std::shared_ptr<GraphView> graphView){
+        std::map<std::string,NodeRegex*> nodesRegex ;
+        nodesRegex["Flatten"] = new NodeRegex("Flatten");
+        nodesRegex["FC"] = new NodeRegex("FC");
+        std::vector<std::string> seqRegex;
+        seqRegex.push_back("Flatten->FC;");
+        GRegex GReg(nodesRegex, seqRegex);
+        Match matches = GReg.match(graphView);
+        std::vector<std::set<std::shared_ptr<Node>>> matchNodes = matches.getMatchNodes();
+        for (size_t i = 0; i < matches.getNbMatch(); ++i) {
+            removeFlatten(matchNodes[i]);
+        }
+    }
+}
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index dc0768d2b6f7a1dd46fc0a8523b950011f7dcf5d..4dc8eb5c84ddb25546a32a672bdc84685a6f79f0 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -34,6 +34,11 @@ void drawProgressBar(double progress, int barWidth, const std::string& additiona
 }
 
 void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
+    // TODO: For loop on the list of node to run
+    // run sequencially every runnable consumers once
+    // TODO: handle memory allocation in scheduler
+    // TODO: optimize memory usage
+
     // setup initial producers list
     mComputationNumber = 0;
     std::set<std::shared_ptr<Node>> producers;
@@ -74,16 +79,16 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
                        "\n\t\tR/C:\t",
                        (consumer->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(consumer.get()))).c_str());
                 for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) {
-                    printf("%ld/%ld\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
+                    printf("%zu/%zu\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
                            consumer->getOperator()->getNbRequiredData(inId));
                 }
-                printf("%ld/%ld", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
+                printf("%zu/%zu", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
                        consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
                 printf("\n\t\tP:\t");
                 for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) {
-                    printf("%ld\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
+                    printf("%zu\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
                 }
-                printf("%ld", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
+                printf("%zu", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
                 printf("\n");
             }
             bool isRunnable = true;
@@ -123,13 +128,13 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
                     printf("%ld/%ld\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
                            consumer->getOperator()->getNbRequiredData(inId));
                 }
-                printf("%ld/%ld", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
+                printf("%zu/%zu", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
                        consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
                 printf("\n\t\tP:\t");
                 for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) {
-                    printf("%ld\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
+                    printf("%zu\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
                 }
-                printf("%ld", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
+                printf("%zu", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
                 printf("\n");
             }
             bool isStillConsumer = false;
@@ -180,35 +185,20 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
     mScheduling.clear();
 
     this->generateScheduling();
-
-    // TODO: For loop on the list of node to run
-    // run sequencially every runnable consumers once
-    // TODO: handle memory allocation in scheduler
-    // TODO: optimize memory usage
+    int cpt = 0;
     for (const auto& runnable : mStaticSchedule) {
-        bool computationOverForConsumer = true;
-        for (IOIndex_t parentIDi = 0; parentIDi < runnable->nbInputs(); ++parentIDi) {
-            if (runnable->getOperator()->getNbConsumedData(parentIDi) <
-                runnable->getOperator()->getNbRequiredData(parentIDi)) {
-                computationOverForConsumer = false;
-                break;
-            }
-        }
-        if (computationOverForConsumer) {
-            computationOver.insert(runnable);
-        }
-
         if (verbose)
             printf("run: %s\n",
                     (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str());
         else
-            drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(mComputationNumber), 50,
+            drawProgressBar(static_cast<float>(cpt) / static_cast<float>(mStaticSchedule.size()), 50,
                             (std::string("running ") + runnable->type() + "_" +
                                 std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))));
         const auto tStart = std::chrono::high_resolution_clock::now();
         runnable->forward();
         const auto tEnd = std::chrono::high_resolution_clock::now();
         mScheduling.push_back(SchedulingElement(runnable, tStart, tEnd));
+        cpt++;
     }
     if (!verbose) drawProgressBar(1.0, 50, "                                   ");
     printf("\n");
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 2208399897f586becca798eb469344af01dbab64..8d634cc3a105c423b54b6003f41204aeb1fc5335 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -17,72 +17,72 @@
 
 using namespace Aidge;
 
-TEST_CASE("[core/operators] GenericOp(add & get parameters)", "[Operator]") {
+TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("INT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        const char* key = "intParam";
-        Testop.addParameter(key, int(5));
-        int registeredVal = Testop.getParameter<int>(key);
+        const char* key = "intAttr";
+        Testop.addAttr(key, int(5));
+        int registeredVal = Testop.getAttr<int>(key);
         REQUIRE(registeredVal == 5);
     }
     SECTION("LONG") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         long value = 3;
-        const char* key = "longParam";
-        Testop.addParameter(key, value);
-        REQUIRE(Testop.getParameter<long>(key) == value);
+        const char* key = "longAttr";
+        Testop.addAttr(key, value);
+        REQUIRE(Testop.getAttr<long>(key) == value);
     }
     SECTION("FLOAT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         float value = 2.0;
-        const char* key = "floatParam";
-        Testop.addParameter(key, value);
-        REQUIRE(Testop.getParameter<float>(key) == value);
+        const char* key = "floatAttr";
+        Testop.addAttr(key, value);
+        REQUIRE(Testop.getAttr<float>(key) == value);
     }
      SECTION("VECTOR<BOOL>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<bool> value = {true, false, false, true, true};
         const char* key = "vect";
-        Testop.addParameter(key, value);
+        Testop.addAttr(key, value);
 
-        REQUIRE(Testop.getParameter<std::vector<bool>>(key).size() == value.size());
+        REQUIRE(Testop.getAttr<std::vector<bool>>(key).size() == value.size());
         for (std::size_t i=0; i < value.size(); ++i){
-            REQUIRE(Testop.getParameter<std::vector<bool>>(key)[i] == value[i]);
+            REQUIRE(Testop.getAttr<std::vector<bool>>(key)[i] == value[i]);
         }
     }
     SECTION("VECTOR<INT>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<int> value = {1, 2, 3, 4, 5, 6, 7, 8, 9};
         const char* key = "vect";
-        Testop.addParameter(key, value);
+        Testop.addAttr(key, value);
 
-        REQUIRE(Testop.getParameter<std::vector<int>>(key).size() == value.size());
+        REQUIRE(Testop.getAttr<std::vector<int>>(key).size() == value.size());
         for (std::size_t i=0; i < value.size(); ++i){
-            REQUIRE(Testop.getParameter<std::vector<int>>(key)[i] == value[i]);
+            REQUIRE(Testop.getAttr<std::vector<int>>(key)[i] == value[i]);
         }
     }
     SECTION("MULTIPLE PARAMS") {
         /*
-        Goal : Test that the offsets are well done by adding different parameters with different size.
+        Goal : Test that the offsets are well done by adding different attributes with different size.
         */
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addParameter<long>("longParam", 3);
-        Testop.addParameter<float>("floatParam", 2.0);
-        Testop.addParameter<uint8_t>("uint8Param", 5);
-        Testop.addParameter<long long>("llParam", 10);
-        REQUIRE(Testop.getParameter<long>("longParam") == 3);
-        REQUIRE(Testop.getParameter<float>("floatParam") == 2.0);
-        REQUIRE(Testop.getParameter<uint8_t>("uint8Param") == 5);
-        REQUIRE(Testop.getParameter<long long>("llParam") == 10);
+        Testop.addAttr<long>("longAttr", 3);
+        Testop.addAttr<float>("floatAttr", 2.0);
+        Testop.addAttr<uint8_t>("uint8Attr", 5);
+        Testop.addAttr<long long>("llAttr", 10);
+        REQUIRE(Testop.getAttr<long>("longAttr") == 3);
+        REQUIRE(Testop.getAttr<float>("floatAttr") == 2.0);
+        REQUIRE(Testop.getAttr<uint8_t>("uint8Attr") == 5);
+        REQUIRE(Testop.getAttr<long long>("llAttr") == 10);
     }
 }
 
-TEST_CASE("[core/operator] GenericOp(type check)", "[.ass]") {
+TEST_CASE("[core/operator] GenericOp(type check)", "[Operator]") {
     SECTION("WRONG TYPE FOR GETTER") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addParameter<long>("longParam", 3);
+        Testop.addAttr<long>("longAttr", 3);
 
         // This line should raise a failled assert
-        REQUIRE_THROWS(Testop.getParameter<int>("longParameter"));
+        REQUIRE_THROWS(Testop.getAttr<int>("longAttribute"));
     }
 }
diff --git a/unit_tests/recipies/Test_FuseMulAdd.cpp b/unit_tests/recipies/Test_FuseMulAdd.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..da53642055a3146c71a211ad7816f21c9b92d6cd
--- /dev/null
+++ b/unit_tests/recipies/Test_FuseMulAdd.cpp
@@ -0,0 +1,77 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <set>
+
+// #include "aidge/backend/cpu/operator/AddImpl.hpp"
+// #include "aidge/backend/cpu/operator/ConvImpl.hpp"
+// #include "aidge/backend/cpu/operator/FCImpl.hpp"
+// #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Recipies.hpp"
+
+namespace Aidge {
+
+TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
+    // generate the original GraphView
+    auto matmul0 = MatMul(5, "matmul0");
+    auto add0 = Add<2>("add0");
+    auto matmul1 = MatMul(5, "matmul1");
+    auto add1 = Add<2>("add1");
+
+    auto b0 = Producer({5}, "B0");
+    auto w0 = Producer({5, 5}, "W0");
+    auto b1 = Producer({5}, "B1");
+    auto w1 = Producer({5,5},"W1");
+    auto input = Producer({2,5}, "input");
+
+    input->addChild(matmul0, 0, 0);
+    w0->addChild(matmul0, 0, 1);
+
+    matmul0->addChild(add0, 0, 0);
+    b0->addChild(add0, 0, 1);
+
+    add0->addChild(matmul1, 0, 0);
+    w1->addChild(matmul1, 0, 1);
+
+    matmul1->addChild(add1, 0, 0);
+    b1->addChild(add1, 0, 1);
+
+    auto g = std::make_shared<GraphView>();
+    g->add({matmul0, add0, matmul1, add1, b0, b1});
+
+    // Check original graph
+    REQUIRE(g->getNodes() ==
+            std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
+    REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
+    REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0)));
+    REQUIRE(((matmul1->getParent(0) == add0) && (matmul1->getParent(1) == w1)));
+    REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
+
+	// Transform GraphView inplace
+    fuseMulAdd(g);
+	g->save("bonjour");
+
+	// Check new GraphView
+	 std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
+	REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
+	REQUIRE(newNodes.size() == 6);
+	for (const auto& node : newNodes) {
+		REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
+	}
+}
+}  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/utils/Test_StaticAttributes.cpp b/unit_tests/utils/Test_StaticAttributes.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..36c2e0454b415e1cb25cc3581016530a372b9e65
--- /dev/null
+++ b/unit_tests/utils/Test_StaticAttributes.cpp
@@ -0,0 +1,48 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include <string>
+#include <vector>
+
+#include "aidge/utils/StaticAttributes.hpp"
+
+using namespace Aidge;
+
+enum class TestAttr { a, b, c, d };
+
+namespace {
+template <>
+const char *const EnumStrings<TestAttr>::data[] = {
+    "a",
+    "b",
+    "c",
+    "d"
+};
+}
+
+using Attributes_ = StaticAttributes<TestAttr, int, float, std::string, std::vector<bool>>;
+template <TestAttr e>
+using attr = typename Attributes_::template attr<e>;
+
+TEST_CASE("[core/attributes] StaticAttribute") {
+    SECTION("TestAttr") {
+        StaticAttributes<TestAttr, int, float, std::string, std::vector<bool>> attrs(
+            attr<TestAttr::a>(42),
+            attr<TestAttr::b>(18.75),
+            attr<TestAttr::c>("test"),
+            attr<TestAttr::d>({true, false, true}));
+
+        REQUIRE(attrs.getAttr<int>("a") == 42);
+        REQUIRE_THROWS(attrs.getAttr<int>("inexistant"));
+    }
+}