From 3fff049fedecada7c34e204ba99795f2da9174c2 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Mon, 2 Oct 2023 11:28:32 +0200
Subject: [PATCH] Renamed parameter to attribute

---
 .../unit_tests/test_operator_binding.py       |  32 +--
 aidge_core/unit_tests/test_parameters.py      |  16 +-
 include/aidge/aidge.hpp                       |   6 +-
 include/aidge/data/Data.hpp                   |   2 +-
 include/aidge/hook/hook.hpp                   |   2 +-
 include/aidge/operator/Add.hpp                |   2 +-
 include/aidge/operator/AvgPooling.hpp         |  34 +--
 include/aidge/operator/BatchNorm.hpp          |  22 +-
 include/aidge/operator/Conv.hpp               |  44 ++--
 include/aidge/operator/ConvDepthWise.hpp      |  44 ++--
 include/aidge/operator/FC.hpp                 |  28 +--
 include/aidge/operator/GenericOperator.hpp    |   6 +-
 include/aidge/operator/LeakyReLU.hpp          |  20 +-
 include/aidge/operator/Matmul.hpp             |  24 +--
 include/aidge/operator/MaxPooling.hpp         |  34 +--
 include/aidge/operator/MetaOperator.hpp       |   2 +-
 include/aidge/operator/Producer.hpp           |   4 +-
 include/aidge/operator/ReLU.hpp               |   2 +-
 include/aidge/operator/Scaling.hpp            |  20 +-
 include/aidge/operator/Softmax.hpp            |   2 +-
 .../utils/{Parameters.hpp => Attributes.hpp}  |  24 +--
 include/aidge/utils/DynamicAttributes.hpp     | 144 +++++++++++++
 include/aidge/utils/DynamicParameters.hpp     | 147 -------------
 include/aidge/utils/StaticAttributes.hpp      | 197 ++++++++++++++++++
 include/aidge/utils/StaticParameters.hpp      | 197 ------------------
 python_binding/operator/pybind_AvgPooling.cpp |   2 +-
 python_binding/operator/pybind_BatchNorm.cpp  |   2 +-
 python_binding/operator/pybind_Conv.cpp       |   2 +-
 .../operator/pybind_ConvDepthWise.cpp         |   2 +-
 python_binding/operator/pybind_FC.cpp         |   2 +-
 .../operator/pybind_GenericOperator.cpp       |   2 +-
 python_binding/operator/pybind_LeakyReLU.cpp  |   2 +-
 python_binding/operator/pybind_Matmul.cpp     |   2 +-
 python_binding/operator/pybind_MaxPooling.cpp |   2 +-
 python_binding/pybind_core.cpp                |   4 +-
 python_binding/utils/pybind_Parameter.cpp     |  34 +--
 src/graphmatching/NodeRegex.cpp               |   6 +-
 src/recipies/LabelGraph.cpp                   |   6 +-
 unit_tests/operator/Test_GenericOperator.cpp  |  54 ++---
 39 files changed, 587 insertions(+), 590 deletions(-)
 rename include/aidge/utils/{Parameters.hpp => Attributes.hpp} (65%)
 create mode 100644 include/aidge/utils/DynamicAttributes.hpp
 delete mode 100644 include/aidge/utils/DynamicParameters.hpp
 create mode 100644 include/aidge/utils/StaticAttributes.hpp
 delete mode 100644 include/aidge/utils/StaticParameters.hpp

diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index b326e0748..96544ecda 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -30,36 +30,36 @@ class test_operator_binding(unittest.TestCase):
         self.assertNotEqual(gop.name(), "")
 
     def test_param_bool(self):
-        self.generic_operator.add_parameter("bool", True)
-        self.assertEqual(self.generic_operator.get_parameter("bool"), True)
+        self.generic_operator.add_attr("bool", True)
+        self.assertEqual(self.generic_operator.get_attr("bool"), True)
 
     def test_param_int(self):
-        self.generic_operator.add_parameter("int", 1)
-        self.assertEqual(self.generic_operator.get_parameter("int"), 1)
+        self.generic_operator.add_attr("int", 1)
+        self.assertEqual(self.generic_operator.get_attr("int"), 1)
 
     def test_param_float(self):
-        self.generic_operator.add_parameter("float", 2.0)
-        self.assertEqual(self.generic_operator.get_parameter("float"), 2.0)
+        self.generic_operator.add_attr("float", 2.0)
+        self.assertEqual(self.generic_operator.get_attr("float"), 2.0)
 
     def test_param_str(self):
-        self.generic_operator.add_parameter("str", "value")
-        self.assertEqual(self.generic_operator.get_parameter("str"), "value")
+        self.generic_operator.add_attr("str", "value")
+        self.assertEqual(self.generic_operator.get_attr("str"), "value")
 
     def test_param_l_int(self):
-        self.generic_operator.add_parameter("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
-        self.assertEqual(self.generic_operator.get_parameter("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.generic_operator.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.assertEqual(self.generic_operator.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
 
     def test_param_l_bool(self):
-        self.generic_operator.add_parameter("l_bool", [True, False, False, True])
-        self.assertEqual(self.generic_operator.get_parameter("l_bool"), [True, False, False, True])
+        self.generic_operator.add_attr("l_bool", [True, False, False, True])
+        self.assertEqual(self.generic_operator.get_attr("l_bool"), [True, False, False, True])
 
     def test_param_l_float(self):
-        self.generic_operator.add_parameter("l_float", [2.0, 1.0])
-        self.assertEqual(self.generic_operator.get_parameter("l_float"), [2.0, 1.0])
+        self.generic_operator.add_attr("l_float", [2.0, 1.0])
+        self.assertEqual(self.generic_operator.get_attr("l_float"), [2.0, 1.0])
 
     def test_param_l_str(self):
-        self.generic_operator.add_parameter("l_str", ["ok"])
-        self.assertEqual(self.generic_operator.get_parameter("l_str"), ["ok"])
+        self.generic_operator.add_attr("l_str", ["ok"])
+        self.assertEqual(self.generic_operator.get_attr("l_str"), ["ok"])
 
 if __name__ == '__main__':
     unittest.main()
\ No newline at end of file
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index 170d726db..10d7e4164 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -11,7 +11,7 @@ SPDX-License-Identifier: EPL-2.0
 import unittest
 import aidge_core
 
-class test_parameters(unittest.TestCase):
+class test_attributes(unittest.TestCase):
     """Very basic test to make sure the python APi is not broken.
     Can be remove in later stage of the developpement.
     """
@@ -27,21 +27,21 @@ class test_parameters(unittest.TestCase):
         out_channels = 8
         k_dims = [2, 2]
         conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator()
-        self.assertEqual(conv_op.get_parameter("InChannels"), in_channels)
-        self.assertEqual(conv_op.get_parameter("OutChannels"), out_channels)
-        self.assertEqual(conv_op.get_parameter("KernelDims"), k_dims)
+        self.assertEqual(conv_op.get_attr("InChannels"), in_channels)
+        self.assertEqual(conv_op.get_attr("OutChannels"), out_channels)
+        self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
 
     def test_fc(self):
         out_channels = 8
         nb_bias = True
         fc_op = aidge_core.FC(out_channels, nb_bias).get_operator()
-        self.assertEqual(fc_op.get_parameter("OutChannels"), out_channels)
-        self.assertEqual(fc_op.get_parameter("NoBias"), nb_bias)
+        self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
+        self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
 
     def test_matmul(self):
         out_channels = 8
         matmul_op = aidge_core.Matmul(out_channels).get_operator()
-        self.assertEqual(matmul_op.get_parameter("OutChannels"), out_channels)
+        self.assertEqual(matmul_op.get_attr("OutChannels"), out_channels)
 
     def test_producer_1D(self):
         dims = [5]
@@ -71,7 +71,7 @@ class test_parameters(unittest.TestCase):
     def test_leaky_relu(self):
         negative_slope = 0.25
         leakyrelu_op = aidge_core.LeakyReLU(negative_slope).get_operator()
-        self.assertEqual(leakyrelu_op.get_parameter("NegativeSlope"), negative_slope)
+        self.assertEqual(leakyrelu_op.get_attr("NegativeSlope"), negative_slope)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 0ae6d89ae..21c587818 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -42,9 +42,9 @@
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
-#include "aidge/utils/Parameters.hpp"
-#include "aidge/utils/StaticParameters.hpp"
-#include "aidge/utils/DynamicParameters.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Recipies.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 652ca1f7c..02f4df320 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_DATA_H_
 #define AIDGE_DATA_H_
 
-#include "aidge/utils/Parameters.hpp"
+#include "aidge/utils/Attributes.hpp"
 
 namespace Aidge {
 enum class DataType {
diff --git a/include/aidge/hook/hook.hpp b/include/aidge/hook/hook.hpp
index 661f78e57..28f7ef5cd 100644
--- a/include/aidge/hook/hook.hpp
+++ b/include/aidge/hook/hook.hpp
@@ -17,7 +17,7 @@
 #ifndef Hook_H_
 #define Hook_H_
 
-#include "aidge/utils/Parameters.hpp"
+#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include <memory>
 
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 303092911..1e0f17e6d 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -48,7 +48,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Add_Op(const Add_Op<NUM>& op)
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 1028cf1ad..b29463c67 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticParameters.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class AvgPoolingParam { StrideDims, KernelDims, PaddingDims };
+enum class AvgPoolingAttr { StrideDims, KernelDims, PaddingDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public Operator,
                 public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
-                public StaticParameters<AvgPoolingParam,
+                public StaticAttributes<AvgPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, (DIM<<1) >> {
@@ -45,30 +45,30 @@ public:
 
     AvgPooling_Op() = delete;
 
-    using Parameters_ = StaticParameters<AvgPoolingParam,
+    using Attributes_ = StaticAttributes<AvgPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, (DIM<<1)> >;
-    template <AvgPoolingParam e>
-    using param = typename Parameters_::template param<e>;
+    template <AvgPoolingAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                             const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
         : Operator(Type),
-          Parameters_(param<AvgPoolingParam::StrideDims>(stride_dims),
-                           param<AvgPoolingParam::KernelDims>(kernel_dims),
-                           param<AvgPoolingParam::PaddingDims>(padding_dims)) {
+          Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
+                           attr<AvgPoolingAttr::KernelDims>(kernel_dims),
+                           attr<AvgPoolingAttr::PaddingDims>(padding_dims)) {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     AvgPooling_Op(const AvgPooling_Op<DIM>& op)
         : Operator(Type),
-          Parameters_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -96,13 +96,13 @@ public:
         if (!mInput->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < this->template get<AvgPoolingParam::KernelDims>().size() ; ++dim) {
+            for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                             std::floor(static_cast<float>(mInput->dims()[dim+2] -
-                                                                    this->template get<AvgPoolingParam::KernelDims>()[dim] +
-                                                                    this->template get<AvgPoolingParam::PaddingDims>()[dim] +
-                                                                    this->template get<AvgPoolingParam::PaddingDims>()[dim+DIM]) /
-                                            static_cast<float>(this->template get<AvgPoolingParam::StrideDims>()[dim])));
+                                                                    this->template getAttr<AvgPoolingAttr::KernelDims>()[dim] +
+                                                                    this->template getAttr<AvgPoolingAttr::PaddingDims>()[dim] +
+                                                                    this->template getAttr<AvgPoolingAttr::PaddingDims>()[dim+DIM]) /
+                                            static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = mInput->dims()[1];
             outputDims[0] = mInput->dims()[0];
@@ -189,7 +189,7 @@ inline std::shared_ptr<Node> AvgPooling(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::AvgPoolingParam>::data[] = {"StrideDims",
+const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
                                                           "KernelDims", "PaddingDims"};
 }
 
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index c1e56fcec..b6ae18724 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticParameters.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class BatchNormParam { Epsilon, Momentum };
+enum class BatchNormAttr { Epsilon, Momentum };
 
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public Operator,
                 public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
-                public StaticParameters<BatchNormParam, float, float> {
+                public StaticAttributes<BatchNormAttr, float, float> {
 public:
     // FIXME: change accessibility
     std::array<std::shared_ptr<Tensor>, 5> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(),
@@ -44,25 +44,25 @@ public:
 
     BatchNorm_Op() = delete;
 
-    using Parameters_ = StaticParameters<BatchNormParam, float, float>;
-    template <BatchNormParam e>
-    using param = typename Parameters_::template param<e>;
+    using Attributes_ = StaticAttributes<BatchNormAttr, float, float>;
+    template <BatchNormAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr BatchNorm_Op(float epsilon, float momentum)
         : Operator(Type),
-          Parameters_(param<BatchNormParam::Epsilon>(epsilon),
-                           param<BatchNormParam::Momentum>(momentum)),
+          Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
+                           attr<BatchNormAttr::Momentum>(momentum)),
           mOutput(std::make_shared<Tensor>()) {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     BatchNorm_Op(const BatchNorm_Op<DIM>& op)
         : Operator(Type),
-          Parameters_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -179,7 +179,7 @@ inline std::shared_ptr<Node> BatchNorm(const float epsilon = 1.0e-5F,
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::BatchNormParam>::data[] = { "Epsilon", "Momentum" };
+const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
 }
 
 #endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index e2d25da74..12bd84ce6 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticParameters.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvParam { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, PaddingDims };
+enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, PaddingDims };
 
 template <DimIdx_t DIM>
 class Conv_Op : public Operator,
                 public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
-                public StaticParameters<ConvParam, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
+                public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
                                        DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >> {
 public:
     // FIXME: change accessibility
@@ -44,10 +44,10 @@ public:
 
     Conv_Op() = delete;
 
-    using Parameters_ = StaticParameters<ConvParam, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
+    using Attributes_ = StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
                                              DimSize_t, DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >>;
-    template <ConvParam e>
-    using param = typename Parameters_::template param<e>;
+    template <ConvAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr Conv_Op(DimSize_t in_channels,
                       DimSize_t out_channels,
@@ -56,22 +56,22 @@ public:
                       const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                       const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
-          Parameters_(param<ConvParam::StrideDims>(stride_dims),
-                           param<ConvParam::DilationDims>(dilation_dims),
-                           param<ConvParam::InChannels>(in_channels),
-                           param<ConvParam::OutChannels>(out_channels),
-                           param<ConvParam::KernelDims>(kernel_dims),
-                           param<ConvParam::PaddingDims>(padding_dims)) {
+          Attributes_(attr<ConvAttr::StrideDims>(stride_dims),
+                           attr<ConvAttr::DilationDims>(dilation_dims),
+                           attr<ConvAttr::InChannels>(in_channels),
+                           attr<ConvAttr::OutChannels>(out_channels),
+                           attr<ConvAttr::KernelDims>(kernel_dims),
+                           attr<ConvAttr::PaddingDims>(padding_dims)) {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Conv_Op(const Conv_Op<DIM>& op)
         : Operator(Type),
-          Parameters_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -111,19 +111,19 @@ public:
         if (!mInputs[0]->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < this->template get<ConvParam::KernelDims>().size() ; ++dim) {
-                const DimSize_t kernelExtent = this->template get<ConvParam::DilationDims>()[dim] *
-                                                       (this->template get<ConvParam::KernelDims>()[dim] - 1) +
+            for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
+                const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
+                                                       (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                         floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
-                                                 this->template get<ConvParam::PaddingDims>()[dim] +
-                                                 this->template get<ConvParam::PaddingDims>()[dim+DIM]) /
-                              static_cast<float>(this->template get<ConvParam::StrideDims>()[dim])));
+                                                 this->template getAttr<ConvAttr::PaddingDims>()[dim] +
+                                                 this->template getAttr<ConvAttr::PaddingDims>()[dim+DIM]) /
+                              static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
             }
 
-            outputDims[1] = this->template get<ConvParam::OutChannels>();
+            outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
             outputDims[0] = mInputs[0]->dims()[0];
             mOutput->resize(outputDims);
         }
@@ -216,7 +216,7 @@ inline std::shared_ptr<Node> Conv(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvParam>::data[] = {"StrideDims", "DilationDims", "InChannels", "OutChannels",
+const char *const EnumStrings<Aidge::ConvAttr>::data[] = {"StrideDims", "DilationDims", "InChannels", "OutChannels",
                                                           "KernelDims", "PaddingDims"};
 }
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 6731f6047..7a4db68ba 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticParameters.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseParam { StrideDims, DilationDims, Channels, KernelDims, PaddingDims };
+enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, PaddingDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public Operator,
                 public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
-                public StaticParameters<ConvDepthWiseParam,
+                public StaticAttributes<ConvDepthWiseAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        DimSize_t,
@@ -48,35 +48,35 @@ class ConvDepthWise_Op : public Operator,
 
     ConvDepthWise_Op() = delete;
 
-    using Parameters_ = StaticParameters<ConvDepthWiseParam,
+    using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              DimSize_t,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, (DIM<<1) >>;
-    template <ConvDepthWiseParam e>
-    using param = typename Parameters_::template param<e>;
+    template <ConvDepthWiseAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
-          Parameters_(param<ConvDepthWiseParam::StrideDims>(stride_dims),
-                           param<ConvDepthWiseParam::DilationDims>(dilation_dims),
-                           param<ConvDepthWiseParam::Channels>(0),
-                           param<ConvDepthWiseParam::KernelDims>(kernel_dims),
-                           param<ConvDepthWiseParam::PaddingDims>(padding_dims)) {
+          Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
+                           attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
+                           attr<ConvDepthWiseAttr::Channels>(0),
+                           attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
+                           attr<ConvDepthWiseAttr::PaddingDims>(padding_dims)) {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op)
         : Operator(Type),
-          Parameters_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -103,19 +103,19 @@ class ConvDepthWise_Op : public Operator,
         if (!mInputs[0]->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < this->template get<ConvDepthWiseParam::KernelDims>().size() ; ++dim) {
-                const DimSize_t kernelExtent = this->template get<ConvDepthWiseParam::DilationDims>()[dim] *
-                                                       (this->template get<ConvDepthWiseParam::KernelDims>()[dim] - 1) +
+            for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
+                const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
+                                                       (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
                                                1;
 
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                         floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent +
-                                                 this->template get<ConvDepthWiseParam::PaddingDims>()[dim] +
-                                                 this->template get<ConvDepthWiseParam::PaddingDims>()[dim+DIM]) /
-                              static_cast<float>(this->template get<ConvDepthWiseParam::StrideDims>()[dim])));
+                                                 this->template getAttr<ConvDepthWiseAttr::PaddingDims>()[dim] +
+                                                 this->template getAttr<ConvDepthWiseAttr::PaddingDims>()[dim+DIM]) /
+                              static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
             }
-            this->template get<ConvDepthWiseParam::Channels>() = mInputs[0]->dims()[1];
-            // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template get<ConvDepthWiseParam::KernelDims>()));
+            this->template getAttr<ConvDepthWiseAttr::Channels>() = mInputs[0]->dims()[1];
+            // std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
             // if (mInputs[1]->empty()) {
             //     mInputs[1]->resize(weightDims);
             // }
@@ -212,7 +212,7 @@ inline std::shared_ptr<Node> ConvDepthWise(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseParam>::data[] = {"StrideDims", "DilationDims", "Channels",
+const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels",
                                                           "KernelDims", "PaddingDims"};
 }
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index a979427e9..127d39a8b 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -23,17 +23,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticParameters.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class FCParam { OutChannels, NoBias };
+enum class FCAttr { OutChannels, NoBias };
 
 class FC_Op : public Operator,
               public Registrable<FC_Op,
                                  std::string,
                                  std::unique_ptr<OperatorImpl>(const FC_Op &)>,
-              public StaticParameters<FCParam, DimSize_t, bool> {
+              public StaticAttributes<FCAttr, DimSize_t, bool> {
 public:
     // FIXME: change accessibility
     std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(), std::make_shared<Tensor>()};
@@ -44,25 +44,25 @@ public:
 
     FC_Op() = delete;
 
-    using Parameters_ = StaticParameters<FCParam, DimSize_t, bool>;
-    template <FCParam e> using param = typename Parameters_::template param<e>;
+    using Attributes_ = StaticAttributes<FCAttr, DimSize_t, bool>;
+    template <FCAttr e> using attr = typename Attributes_::template attr<e>;
 
     FC_Op(DimSize_t out_channels, bool noBias)
             : Operator(Type),
-            Parameters_(
-                param<FCParam::OutChannels>(out_channels),
-                param<FCParam::NoBias>(noBias))
+            Attributes_(
+                attr<FCAttr::OutChannels>(out_channels),
+                attr<FCAttr::NoBias>(noBias))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     FC_Op(const FC_Op& op)
         : Operator(Type),
-          Parameters_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -82,7 +82,7 @@ public:
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
         if (inputIdx == 2) {
-            assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template get<FCParam::NoBias>()) == false ? static_cast<std::size_t>(this->template get<FCParam::OutChannels>()) : 0));
+            assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0));
             assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
         }
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
@@ -93,9 +93,9 @@ public:
     void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             // <in_features**, out_channels>
-            std::array<DimSize_t, 2> weightDims = {this->template get<FCParam::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
+            std::array<DimSize_t, 2> weightDims = {this->template getAttr<FCAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
             // <out_channels, batch>
-            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template get<FCParam::OutChannels>()};
+            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<FCAttr::OutChannels>()};
 
             mInputs[1]->resize(weightDims);
             mOutput->resize(outputDims);
@@ -171,7 +171,7 @@ inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, con
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::FCParam>::data[] = {"OutChannels",
+const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index ea29dce00..1e5186617 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -20,7 +20,7 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/DynamicParameters.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
@@ -28,7 +28,7 @@ namespace Aidge {
 class GenericOperator_Op
     : public Operator,
       public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
-      public DynamicParameters {
+      public DynamicAttributes {
    private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
@@ -54,7 +54,7 @@ class GenericOperator_Op
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     GenericOperator_Op(const GenericOperator_Op& op)
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index dc84616d7..c6ee01239 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -15,7 +15,7 @@
 #include <vector>
 #include <memory>
 
-#include "aidge/utils/StaticParameters.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
@@ -25,13 +25,13 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class LeakyReLUParam {
+enum class LeakyReLUAttr {
     NegativeSlope
 };
 
 class LeakyReLU_Op : public Operator,
     public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
-    public StaticParameters<LeakyReLUParam, float> {
+    public StaticAttributes<LeakyReLUAttr, float> {
 public:
     // FIXME: change accessibility
     std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
@@ -42,24 +42,24 @@ public:
 
     LeakyReLU_Op() = delete;
 
-    using Parameters_ = StaticParameters<LeakyReLUParam, float>;
-    template <LeakyReLUParam e> using param = typename Parameters_::template param<e>;
+    using Attributes_ = StaticAttributes<LeakyReLUAttr, float>;
+    template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
 
     LeakyReLU_Op(float negativeSlope)
             : Operator(Type),
-            Parameters_(
-                param<LeakyReLUParam::NegativeSlope>(negativeSlope))
+            Attributes_(
+                attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     LeakyReLU_Op(const LeakyReLU_Op& op)
         : Operator(Type),
-          Parameters_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -147,7 +147,7 @@ inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::st
 
 namespace {
 template <>
-const char* const EnumStrings<Aidge::LeakyReLUParam>::data[]
+const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
     = {"NegativeSlope"};
 }
 
diff --git a/include/aidge/operator/Matmul.hpp b/include/aidge/operator/Matmul.hpp
index e798fc835..c1fea8a17 100644
--- a/include/aidge/operator/Matmul.hpp
+++ b/include/aidge/operator/Matmul.hpp
@@ -23,17 +23,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticParameters.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class MatmulParam { OutChannels };
+enum class MatmulAttr { OutChannels };
 
 class Matmul_Op : public Operator,
               public Registrable<Matmul_Op,
                                  std::string,
                                  std::unique_ptr<OperatorImpl>(const Matmul_Op &)>,
-              public StaticParameters<MatmulParam, DimSize_t> {
+              public StaticAttributes<MatmulAttr, DimSize_t> {
 public:
     std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
     const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
@@ -43,25 +43,25 @@ public:
 
     Matmul_Op() = delete;
 
-    using Parameters_ = StaticParameters<MatmulParam, DimSize_t>;
-    template <MatmulParam e> using param = typename Parameters_::template param<e>;
+    using Attributes_ = StaticAttributes<MatmulAttr, DimSize_t>;
+    template <MatmulAttr e> using attr = typename Attributes_::template attr<e>;
 
     Matmul_Op(DimSize_t out_channels)
             : Operator(Type),
-            Parameters_(
-                param<MatmulParam::OutChannels>(out_channels)),
+            Attributes_(
+                attr<MatmulAttr::OutChannels>(out_channels)),
             mOutput(std::make_shared<Tensor>())
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Matmul_Op(const Matmul_Op& op)
         : Operator(Type),
-          Parameters_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -86,9 +86,9 @@ public:
     void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             // <in_features**, out_channels>
-            std::array<DimSize_t, 2> weightDims = {static_cast<DimSize_t>(mInputs[0]->size()), this->template get<MatmulParam::OutChannels>()};
+            std::array<DimSize_t, 2> weightDims = {static_cast<DimSize_t>(mInputs[0]->size()), this->template getAttr<MatmulAttr::OutChannels>()};
             // <out_channels, batch>
-            std::array<DimSize_t, 1> outputDims = {this->template get<MatmulParam::OutChannels>()};
+            std::array<DimSize_t, 1> outputDims = {this->template getAttr<MatmulAttr::OutChannels>()};
 
             mInputs[1]->resize(weightDims);
             mOutput->resize(outputDims);
@@ -161,7 +161,7 @@ inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const std::string& n
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::MatmulParam>::data[] = {"OutChannels"};
+const char *const EnumStrings<Aidge::MatmulAttr>::data[] = {"OutChannels"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 2e0e7f445..eae7e30df 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -21,17 +21,17 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticParameters.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims };
+enum class MaxPoolingAttr { StrideDims, KernelDims, PaddingDims };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public Operator,
                 public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
-                public StaticParameters<MaxPoolingParam,
+                public StaticAttributes<MaxPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, (DIM<<1) >> {
@@ -45,31 +45,31 @@ public:
 
     MaxPooling_Op() = delete;
 
-    using Parameters_ = StaticParameters<MaxPoolingParam,
+    using Attributes_ = StaticAttributes<MaxPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, (DIM<<1)> >;
-    template <MaxPoolingParam e>
-    using param = typename Parameters_::template param<e>;
+    template <MaxPoolingAttr e>
+    using attr = typename Attributes_::template attr<e>;
 
     constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                             const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
         : Operator(Type),
-          Parameters_(param<MaxPoolingParam::StrideDims>(stride_dims),
-                           param<MaxPoolingParam::KernelDims>(kernel_dims),
-                           param<MaxPoolingParam::PaddingDims>(padding_dims)),
+          Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
+                           attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+                           attr<MaxPoolingAttr::PaddingDims>(padding_dims)),
           mOutput(std::make_shared<Tensor>()) {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     MaxPooling_Op(const MaxPooling_Op<DIM>& op)
         : Operator(Type),
-          Parameters_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -97,13 +97,13 @@ public:
         if (!mInput->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
-            for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
+            for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                             std::floor(static_cast<float>(mInput->dims()[dim+2] -
-                                                                    this->template get<MaxPoolingParam::KernelDims>()[dim] +
-                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim] +
-                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) /
-                                            static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
+                                                                    this->template getAttr<MaxPoolingAttr::KernelDims>()[dim] +
+                                                                    this->template getAttr<MaxPoolingAttr::PaddingDims>()[dim] +
+                                                                    this->template getAttr<MaxPoolingAttr::PaddingDims>()[dim+DIM]) /
+                                            static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = mInput->dims()[1];
             outputDims[0] = mInput->dims()[0];
@@ -190,7 +190,7 @@ inline std::shared_ptr<Node> MaxPooling(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
+const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 9e12b1598..2cd5c2593 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -23,7 +23,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     MetaOperator(const MetaOperator& op)
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index a1f2707e2..fa621a6e9 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -19,7 +19,7 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/StaticParameters.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
@@ -51,7 +51,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Producer_Op(const Producer_Op& op)
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index cebfa5718..433e353f0 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -43,7 +43,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     ReLU_Op(const ReLU_Op& op)
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 99543b887..0ea6ba39b 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -17,7 +17,7 @@
 
 
 
-#include "aidge/utils/StaticParameters.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
@@ -27,13 +27,13 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ScalingParam {
+enum class ScalingAttr {
     scalingFactor
 };
 
 class Scaling_Op : public Operator,
     public Registrable<Scaling_Op, std::string, std::unique_ptr<OperatorImpl>(const Scaling_Op&)>,
-    public StaticParameters<ScalingParam, float> {
+    public StaticAttributes<ScalingAttr, float> {
 public:
     // FIXME: change accessibility
     std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
@@ -44,24 +44,24 @@ public:
 
     Scaling_Op() = delete;
 
-    using Parameters_ = StaticParameters<ScalingParam, float>;
-    template <ScalingParam e> using param = typename Parameters_::template param<e>;
+    using Attributes_ = StaticAttributes<ScalingAttr, float>;
+    template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
 
     Scaling_Op(float scalingFactor)
             : Operator(Type),
-            Parameters_(
-                param<ScalingParam::scalingFactor>(scalingFactor))
+            Attributes_(
+                attr<ScalingAttr::scalingFactor>(scalingFactor))
     {
         setDatatype(DataType::Float32);
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Scaling_Op(const Scaling_Op& op)
         : Operator(Type),
-          Parameters_(op),
+          Attributes_(op),
           mOutput(std::make_shared<Tensor>(*op.mOutput))
     {
         // cpy-ctor
@@ -155,7 +155,7 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
 
 namespace {
 template <>
-const char* const EnumStrings<Aidge::ScalingParam>::data[]
+const char* const EnumStrings<Aidge::ScalingAttr>::data[]
     = {"scalingFactor"};
 }
 
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index ffaf0001f..898bae4c3 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -43,7 +43,7 @@ public:
     }
 
     /**
-     * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Softmax_Op(const Softmax_Op& op)
diff --git a/include/aidge/utils/Parameters.hpp b/include/aidge/utils/Attributes.hpp
similarity index 65%
rename from include/aidge/utils/Parameters.hpp
rename to include/aidge/utils/Attributes.hpp
index 3016c10b9..641e10415 100644
--- a/include/aidge/utils/Parameters.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CORE_UTILS_PARAMETERS_H_
-#define AIDGE_CORE_UTILS_PARAMETERS_H_
+#ifndef AIDGE_CORE_UTILS_ATTRIBUTES_H_
+#define AIDGE_CORE_UTILS_ATTRIBUTES_H_
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -35,26 +35,26 @@ namespace Aidge {
 template<class T, std::size_t N>
 constexpr std::size_t size(T (&)[N]) { return N; }
 
-/* This abstract class allows to avoid binding Parametrizable.
-*  Otherwise we would need to bind every template possible of Parametrizable.
+/* This abstract class allows to avoid binding Attributes.
+*  Otherwise we would need to bind every template possible of Attributes.
 *  Every operators can access the methods of this class by inheriting from
-*  Parameters in the binding code.
+*  Attributes in the binding code.
 */
-class Parameters {
+class Attributes {
 public:
-    virtual bool isParameter(const std::string& name) const = 0;
-    virtual std::string getParameterType(const std::string& name) const = 0;
-    virtual std::vector<std::string> getParametersName() const = 0;
+    virtual bool hasAttr(const std::string& name) const = 0;
+    virtual std::string getAttrType(const std::string& name) const = 0;
+    virtual std::vector<std::string> getAttrsName() const = 0;
 
 #ifdef PYBIND
     /* Bindable get function, does not recquire any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from its return type.
     */
-    virtual py::object getPy(const std::string& name) const = 0;
+    virtual py::object getAttrPy(const std::string& name) const = 0;
 #endif
-    virtual ~Parameters() {}
+    virtual ~Attributes() {}
 };
 }
 
-#endif /* AIDGE_CORE_UTILS_PARAMETERS_H_ */
+#endif /* AIDGE_CORE_UTILS_ATTRIBUTES_H_ */
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
new file mode 100644
index 000000000..df38ffea4
--- /dev/null
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_
+#define AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_
+
+#include <map>
+#include <vector>
+#include <type_traits>
+#include <typeinfo>
+#include <cassert>
+#include <string>
+
+#include "aidge/utils/Any.hpp"
+#include "aidge/utils/Attributes.hpp"
+
+
+namespace Aidge {
+
+///\todo store also a fix-sized code that indicates the type
+///\todo managing complex types or excluding non-trivial, non-aggregate types
+class DynamicAttributes : public Attributes {
+private:
+    template<typename _ValueType>
+    inline _ValueType& any_cast_ref(const _any& __any)
+    {
+        using _Up =  std::remove_cv_t<std::remove_reference_t<_ValueType>>;
+        assert(((std::is_reference<_ValueType>::value || std::is_copy_constructible<_ValueType>::value) && "Template argument must be a reference or CopyConstructible type"));
+        assert((std::is_constructible<_ValueType, const _Up&>::value && "Template argument must be constructible from a const value."));
+        assert(std::is_object<_Up>::value);
+        assert(__any.type() == typeid(_Up));
+        if (_any::Manager<_Up>::access(&__any)) { // assess if _any object is empty
+            return *static_cast<_ValueType*>(_any::Manager<_Up>::access(&__any));
+        }
+        throw std::bad_cast();
+    }
+
+    template<typename _ValueType>
+    inline const _ValueType& any_cast_ref(const _any& __any) const
+    {
+        using _Up =  std::remove_cv_t<std::remove_reference_t<_ValueType>>;
+        assert(((std::is_reference<_ValueType>::value || std::is_copy_constructible<_ValueType>::value) && "Template argument must be a reference or CopyConstructible type"));
+        assert((std::is_constructible<_ValueType, const _Up&>::value && "Template argument must be constructible from a const value."));
+        assert(std::is_object<_Up>::value);
+        assert(__any.type() == typeid(_Up));
+        if (_any::Manager<_Up>::access(&__any)) { // assess if _any object is empty
+            return *static_cast<const _ValueType*>(_any::Manager<_Up>::access(&__any));
+        }
+        throw std::bad_cast();
+    }
+public:
+    /**
+     * \brief Returning an Attribute identified by its name
+     * \tparam T expected Attribute type
+     * \param name Attribute name
+     * \details assert if T is not the actual Attribute type or if the Attribute does not
+     *  exist
+     * \note at() throws if the Attribute does not exist, using find to test for Attribute existance
+     */
+    template<class T> T& getAttr(const std::string& name)
+    {
+        return any_cast_ref<T>(mAttrs.at(name));
+    }
+
+    template<class T> const T& getAttr(const std::string& name) const
+    {
+        return any_cast_ref<T>(mAttrs.at(name));
+    }
+
+    ///\brief Add a Attribute value, identified by its name
+    ///\tparam T expected Attribute type
+    ///\param name Attribute name
+    ///\param value Attribute value
+    ///\todo Pass value by ref if large or not trivial
+    ///\bug If Attribute already exists, its value is changed
+    template<class T> void addAttr(const std::string& name, T&& value)
+    {
+        mAttrs.emplace(std::make_pair(name, _any(std::forward<T>(value))));
+    }
+
+    //////////////////////////////////////
+    ///     Generic Attributes API
+    //////////////////////////////////////
+    bool hasAttr(const std::string& name) const override final {
+        return (mAttrs.find(name) != mAttrs.end());
+    }
+
+    std::string getAttrType(const std::string& name) const override final {
+        return mAttrs.at(name).type().name();
+    }
+
+    std::vector<std::string> getAttrsName() const override final {
+        std::vector<std::string> attrsName;
+        for(auto const& it: mAttrs)
+            attrsName.push_back(it.first);
+        return attrsName;
+    }
+
+    #ifdef PYBIND
+    /**
+     * @detail See https://github.com/pybind/pybind11/issues/1590 as to why a
+     * generic type caster for std::any is not feasable.
+    */
+    py::object getAttrPy(const std::string& name) const {
+        py::object res = py::none();
+        const auto& attrType = mAttrs.at(name).type();
+        if(attrType == typeid(int))
+            res = py::cast(getAttr<int>(name));
+        else if(attrType == typeid(float))
+            res = py::cast(getAttr<float>(name));
+        else if(attrType == typeid(bool))
+            res = py::cast(getAttr<bool>(name));
+        else if(attrType == typeid(std::string))
+            res = py::cast(getAttr<std::string>(name));
+        else if(attrType == typeid(std::vector<bool>))
+            res = py::cast(getAttr<std::vector<bool>>(name));
+        else if(attrType == typeid(std::vector<int>))
+            res = py::cast(getAttr<std::vector<int>>(name));
+        else if(attrType == typeid(std::vector<float>))
+            res = py::cast(getAttr<std::vector<float>>(name));
+        else if(attrType == typeid(std::vector<std::string>))
+            res = py::cast(getAttr<std::vector<std::string>>(name));
+        else {
+            throw py::key_error("Failed to convert attribute type " + name + ", this issue may come from typeid function which gave an unknown key : [" + attrType.name() + "]. Please open an issue asking to add the support for this key.");
+        }
+        return res;
+    };
+    #endif
+
+private:
+    std::map<std::string, _any> mAttrs;
+};
+
+}
+
+#endif /* AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/DynamicParameters.hpp b/include/aidge/utils/DynamicParameters.hpp
deleted file mode 100644
index 77a509ca4..000000000
--- a/include/aidge/utils/DynamicParameters.hpp
+++ /dev/null
@@ -1,147 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CORE_UTILS_DYNAMICPARAMETERS_H_
-#define AIDGE_CORE_UTILS_DYNAMICPARAMETERS_H_
-
-#include <map>
-#include <vector>
-#include <type_traits>
-#include <typeinfo>
-#include <cassert>
-#include <string>
-
-#include "aidge/utils/Any.hpp"
-#include "aidge/utils/Parameters.hpp"
-
-
-namespace Aidge {
-
-///\todo store also a fix-sized code that indicates the type
-///\todo managing complex types or excluding non-trivial, non-aggregate types
-class DynamicParameters : public Parameters {
-private:
-    template<typename _ValueType>
-    inline _ValueType& any_cast_ref(const _any& __any)
-    {
-        using _Up =  std::remove_cv_t<std::remove_reference_t<_ValueType>>;
-        assert(((std::is_reference<_ValueType>::value || std::is_copy_constructible<_ValueType>::value) && "Template argument must be a reference or CopyConstructible type"));
-        assert((std::is_constructible<_ValueType, const _Up&>::value && "Template argument must be constructible from a const value."));
-        assert(std::is_object<_Up>::value);
-        assert(__any.type() == typeid(_Up));
-        if (_any::Manager<_Up>::access(&__any)) { // assess if _any object is empty
-            return *static_cast<_ValueType*>(_any::Manager<_Up>::access(&__any));
-        }
-        throw std::bad_cast();
-    }
-
-    template<typename _ValueType>
-    inline const _ValueType& any_cast_ref(const _any& __any) const
-    {
-        using _Up =  std::remove_cv_t<std::remove_reference_t<_ValueType>>;
-        assert(((std::is_reference<_ValueType>::value || std::is_copy_constructible<_ValueType>::value) && "Template argument must be a reference or CopyConstructible type"));
-        assert((std::is_constructible<_ValueType, const _Up&>::value && "Template argument must be constructible from a const value."));
-        assert(std::is_object<_Up>::value);
-        assert(__any.type() == typeid(_Up));
-        if (_any::Manager<_Up>::access(&__any)) { // assess if _any object is empty
-            return *static_cast<const _ValueType*>(_any::Manager<_Up>::access(&__any));
-        }
-        throw std::bad_cast();
-    }
-public:
-    /**
-     * \brief Returning a parameter identified by its name
-     * \tparam T expected parameter type
-     * \param name Parameter name
-     * \details assert if T is not the actual parameter type, if the parameter does not
-     *  exist or interna parameter position is invalid.
-     * \todo Returning a T const& ? But dangerous => the client may get an address within
-     *  param buffer that will get invalid after the CParam death.
-     * \note at() throws if the parameter does not exist, using find to test for parameter existance
-     */
-    template<class T> T& getParameter(const std::string& name)
-    {
-        return any_cast_ref<T>(mParams.at(name));
-    }
-
-    template<class T> const T& getParameter(const std::string& name) const
-    {
-        return any_cast_ref<T>(mParams.at(name));
-    }
-
-    ///\brief Add a parameter value, identified by its name
-    ///\tparam T expected parameter type
-    ///\param name Parameter name
-    ///\param value Parameter value
-    ///\todo Pass value by ref if large or not trivial
-    ///\bug If parameter already exists, its value is changed but written in the
-    /// internal buffer in a new location (previous value is still in memory at its previous location)
-    template<class T> void addParameter(const std::string& name, T&& value)
-    {
-        mParams.emplace(std::make_pair(name, _any(std::forward<T>(value))));
-    }
-
-    //////////////////////////////////////
-    ///     Generic Parameters API
-    //////////////////////////////////////
-    bool isParameter(const std::string& name) const override final {
-        return (mParams.find(name) != mParams.end());
-    }
-
-    std::string getParameterType(const std::string& name) const override final {
-        return mParams.at(name).type().name();
-    }
-
-    std::vector<std::string> getParametersName() const override final {
-        std::vector<std::string> parametersName;
-        for(auto const& it: mParams)
-            parametersName.push_back(it.first);
-        return parametersName;
-    }
-
-    #ifdef PYBIND
-    /**
-     * @detail See https://github.com/pybind/pybind11/issues/1590 as to why a
-     * generic type caster for std::any is not feasable.
-    */
-    py::object getPy(const std::string& name) const {
-        py::object res = py::none();
-        const auto& paramType = mParams.at(name).type();
-        if(paramType == typeid(int))
-            res = py::cast(getParameter<int>(name));
-        else if(paramType == typeid(float))
-            res = py::cast(getParameter<float>(name));
-        else if(paramType == typeid(bool))
-            res = py::cast(getParameter<bool>(name));
-        else if(paramType == typeid(std::string))
-            res = py::cast(getParameter<std::string>(name));
-        else if(paramType == typeid(std::vector<bool>))
-            res = py::cast(getParameter<std::vector<bool>>(name));
-        else if(paramType == typeid(std::vector<int>))
-            res = py::cast(getParameter<std::vector<int>>(name));
-        else if(paramType == typeid(std::vector<float>))
-            res = py::cast(getParameter<std::vector<float>>(name));
-        else if(paramType == typeid(std::vector<std::string>))
-            res = py::cast(getParameter<std::vector<std::string>>(name));
-        else {
-            throw py::key_error("Failed to convert parameter type " + name + ", this issue may come from typeid function which gave an unknown key : [" + paramType.name() + "]. Please open an issue asking to add the support for this key.");
-        }
-        return res;
-    };
-    #endif
-
-private:
-    std::map<std::string, _any> mParams;
-};
-
-}
-
-#endif /* AIDGE_CORE_UTILS_DYNAMICPARAMETERS_H_ */
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
new file mode 100644
index 000000000..cb34fc89d
--- /dev/null
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -0,0 +1,197 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
+#define AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
+
+#include <tuple>
+#include <cassert>
+#include <cstddef>
+
+#include "aidge/utils/Attributes.hpp"
+
+namespace Aidge {
+template <class ATTRS_ENUM, class ...T>
+class StaticAttributes : public Attributes {
+public:
+    using Attrs = std::tuple<T...>;
+
+    // Helper class to pass to the constructor
+    template <ATTRS_ENUM attrsEnum>
+    class attr {
+    public:
+        constexpr attr(const typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type& v) : value(v) {}
+        const typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type value;
+    };
+
+/*
+    // Direct tuple initialization
+    StaticAttributes(T... attrs) : mAttrs({attrs...}) {
+
+    }
+*/
+
+    // Constructor for Attributes initialization.
+    // Compile-time garantee that every attribute is initialized.
+    template <ATTRS_ENUM ...attrsEnum> // non-type attribute pack
+    constexpr StaticAttributes(const attr<attrsEnum>&&... attrs) {
+        // Check number of attrs consistency
+        static_assert(sizeof...(attrs) == std::tuple_size<std::tuple<T...>>::value, "wrong number of attributes in constructor");
+        // static_assert(size(EnumStrings<ATTRS_ENUM>::data) == std::tuple_size<std::tuple<T...>>::value, "wrong number of attributes in enum string");
+
+        // Check no duplicates
+        constexpr std::array<ATTRS_ENUM, std::tuple_size<std::tuple<T...>>::value> pe = { attrsEnum... };
+        static_assert(!hasDuplicates(pe), "duplicate attribute"); // requires C++14
+
+        // Init attrs with constructor arguments
+        const std::array<ATTRS_ENUM, std::tuple_size<std::tuple<T...>>::value> p = { ((void)(getAttr<attrsEnum>() = attrs.value), attrsEnum) ... };
+        (void)p; // avoid unused warning
+    }
+
+    // Compile-time access with enum
+    template <ATTRS_ENUM attrsEnum>
+    constexpr typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type& getAttr() {
+        return std::get<static_cast<std::size_t>(attrsEnum)>(mAttrs);
+    }
+
+    template <ATTRS_ENUM attrsEnum>
+    constexpr const typename std::tuple_element<static_cast<std::size_t>(attrsEnum),std::tuple<T...>>::type& getAttr() const {
+        return std::get<static_cast<std::size_t>(attrsEnum)>(mAttrs);
+    }
+
+    // Runtime access with enum
+    template <typename R>
+    constexpr R& getAttr(ATTRS_ENUM attrsEnum) {
+        return getAttr<R>(static_cast<std::size_t>(attrsEnum));
+    }
+
+    template <typename R>
+    constexpr const R& getAttr(ATTRS_ENUM attrsEnum) const {
+        return getAttr<R>(static_cast<std::size_t>(attrsEnum));
+    }
+
+    // Runtime access with name
+    template <typename R>
+    constexpr R& getAttr(const char* name) {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (strcmp(EnumStrings<ATTRS_ENUM>::data[i], name) == 0) {
+                return getAttr<R>(i);
+            }
+        }
+
+        assert(false && "attribute not found");
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
+    constexpr typename std::enable_if<(SIZE > 0), R&>::type getAttr(std::size_t i) {
+        if (i == SIZE) {
+            if (std::is_same<R, typename std::tuple_element<SIZE,std::tuple<T...>>::type>::value) {
+                return reinterpret_cast<R&>(std::get<SIZE>(mAttrs));
+            }
+            else {
+                assert(false && "wrong attribute type");
+            }
+        }
+        else {
+            return getAttr<R, SIZE-1>(i);
+        }
+    }
+
+    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
+    [[noreturn]] constexpr typename std::enable_if<(SIZE <= 0), R&>::type getAttr(std::size_t /*i*/) {
+        assert(false && "attribute not found");
+    }
+
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
+    constexpr typename std::enable_if<(SIZE > 0), std::string>::type getAttrType(std::size_t i) const {
+        if (i == SIZE) {
+            return typeid(typename std::tuple_element<SIZE,std::tuple<T...>>::type).name();
+        }
+        else {
+            return getAttrType<SIZE-1>(i);
+        }
+    }
+
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
+    constexpr typename std::enable_if<(SIZE <= 0), std::string>::type getAttrType(std::size_t /*i*/) const {
+        assert(false && "attribute not found");
+        return std::string();  // useless, but makes MSVC happy
+    }
+
+    constexpr const std::tuple<T...>& getStaticAttributes() const {
+        return mAttrs;
+    }
+
+    //////////////////////////////////////
+    ///     Generic Attributes API
+    //////////////////////////////////////
+    // Runtime existance check with name
+    constexpr bool hasAttr(const std::string& name) const override final {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+
+    // Runtime type access with name
+    constexpr std::string getAttrType(const std::string& name) const override final {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                return getAttrType(i);
+            }
+        }
+
+        assert(false && "attribute not found");
+    }
+
+    std::vector<std::string> getAttrsName() const override final {
+        std::vector<std::string> attrsName;
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            attrsName.push_back(EnumStrings<ATTRS_ENUM>::data[i]);
+        }
+        return attrsName;
+    }
+
+    #ifdef PYBIND
+    py::object getAttrPy(const std::string& name) const {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
+                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
+                return py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+            }
+        }
+        throw py::value_error("Attribute : " + name + " does not exist." );
+    };
+    #endif
+
+private:
+    template <typename V, std::size_t N>
+    static constexpr bool hasDuplicates(const std::array<V, N>& array) {
+        for (std::size_t i = 1; i < N; i++) {
+            for (std::size_t j = 0; j < i; j++) {
+                if (array[i] == array[j]) {
+                    return true;
+                }
+            }
+        }
+
+        return false;
+    }
+
+    std::tuple<T...> mAttrs;
+};
+}
+
+#endif /* AIDGE_CORE_UTILS_STATICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/StaticParameters.hpp b/include/aidge/utils/StaticParameters.hpp
deleted file mode 100644
index cb384fe84..000000000
--- a/include/aidge/utils/StaticParameters.hpp
+++ /dev/null
@@ -1,197 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CORE_UTILS_STATICPARAMETERS_H_
-#define AIDGE_CORE_UTILS_STATICPARAMETERS_H_
-
-#include <tuple>
-#include <cassert>
-#include <cstddef>
-
-#include "aidge/utils/Parameters.hpp"
-
-namespace Aidge {
-template <class PARAM_ENUM, class ...T>
-class StaticParameters : public Parameters {
-public:
-    using Params = std::tuple<T...>;
-
-    // Helper class to pass to the constructor
-    template <PARAM_ENUM paramEnum>
-    class param {
-    public:
-        constexpr param(const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& v) : value(v) {}
-        const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type value;
-    };
-
-/*
-    // Direct tuple initialization
-    StaticParameters(T... params) : mParams({params...}) {
-
-    }
-*/
-
-    // Constructor for parameters initialization.
-    // Compile-time garantee that every parameter is initialized.
-    template <PARAM_ENUM ...paramEnum> // non-type parameter pack
-    constexpr StaticParameters(const param<paramEnum>&&... params) {
-        // Check number of params consistency
-        static_assert(sizeof...(params) == std::tuple_size<std::tuple<T...>>::value, "wrong number of parameters in constructor");
-        // static_assert(size(EnumStrings<PARAM_ENUM>::data) == std::tuple_size<std::tuple<T...>>::value, "wrong number of parameters in enum string");
-
-        // Check no duplicates
-        constexpr std::array<PARAM_ENUM, std::tuple_size<std::tuple<T...>>::value> pe = { paramEnum... };
-        static_assert(!hasDuplicates(pe), "duplicate parameter"); // requires C++14
-
-        // Init params with constructor arguments
-        const std::array<PARAM_ENUM, std::tuple_size<std::tuple<T...>>::value> p = { ((void)(get<paramEnum>() = params.value), paramEnum) ... };
-        (void)p; // avoid unused warning
-    }
-
-    // Compile-time access with enum
-    template <PARAM_ENUM paramEnum>
-    constexpr typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() {
-        return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
-    }
-
-    template <PARAM_ENUM paramEnum>
-    constexpr const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() const {
-        return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
-    }
-
-    // Runtime access with enum
-    template <typename R>
-    constexpr R& get(PARAM_ENUM paramEnum) {
-        return get<R>(static_cast<std::size_t>(paramEnum));
-    }
-
-    template <typename R>
-    constexpr const R& get(PARAM_ENUM paramEnum) const {
-        return get<R>(static_cast<std::size_t>(paramEnum));
-    }
-
-    // Runtime access with name
-    template <typename R>
-    constexpr R& get(const char* name) {
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (strcmp(EnumStrings<PARAM_ENUM>::data[i], name) == 0) {
-                return get<R>(i);
-            }
-        }
-
-        assert(false && "parameter not found");
-    }
-
-    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
-    constexpr typename std::enable_if<(SIZE > 0), R&>::type get(std::size_t i) {
-        if (i == SIZE) {
-            if (std::is_same<R, typename std::tuple_element<SIZE,std::tuple<T...>>::type>::value) {
-                return reinterpret_cast<R&>(std::get<SIZE>(mParams));
-            }
-            else {
-                assert(false && "wrong parameter type");
-            }
-        }
-        else {
-            return get<R, SIZE-1>(i);
-        }
-    }
-
-    template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
-    [[noreturn]] constexpr typename std::enable_if<(SIZE <= 0), R&>::type get(std::size_t /*i*/) {
-        assert(false && "parameter not found");
-    }
-
-    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
-    constexpr typename std::enable_if<(SIZE > 0), std::string>::type getType(std::size_t i) const {
-        if (i == SIZE) {
-            return typeid(typename std::tuple_element<SIZE,std::tuple<T...>>::type).name();
-        }
-        else {
-            return getType<SIZE-1>(i);
-        }
-    }
-
-    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value-1>
-    constexpr typename std::enable_if<(SIZE <= 0), std::string>::type getType(std::size_t /*i*/) const {
-        assert(false && "parameter not found");
-        return std::string();  // useless, but makes MSVC happy
-    }
-
-    constexpr const std::tuple<T...>& getStaticParameters() const {
-        return mParams;
-    }
-
-    //////////////////////////////////////
-    ///     Generic Parameters API
-    //////////////////////////////////////
-    // Runtime existance check with name
-    constexpr bool isParameter(const std::string& name) const override final {
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (name == EnumStrings<PARAM_ENUM>::data[i]) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-
-    // Runtime type access with name
-    constexpr std::string getParameterType(const std::string& name) const override final {
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (name == EnumStrings<PARAM_ENUM>::data[i]) {
-                return getType(i);
-            }
-        }
-
-        assert(false && "parameter not found");
-    }
-
-    std::vector<std::string> getParametersName() const override final {
-        std::vector<std::string> parametersName;
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            parametersName.push_back(EnumStrings<PARAM_ENUM>::data[i]);
-        }
-        return parametersName;
-    }
-
-    #ifdef PYBIND
-    py::object getPy(const std::string& name) const {
-        for (std::size_t i = 0; i < size(EnumStrings<PARAM_ENUM>::data); ++i) {
-            if (name == EnumStrings<PARAM_ENUM>::data[i]) {
-                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
-                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
-                return py::detail::accessor_policies::tuple_item::get(py::cast(mParams), static_cast<py::size_t>(i));
-            }
-        }
-        throw py::value_error("Parameter : " + name + " does not exist." );
-    };
-    #endif
-
-private:
-    template <typename V, std::size_t N>
-    static constexpr bool hasDuplicates(const std::array<V, N>& array) {
-        for (std::size_t i = 1; i < N; i++) {
-            for (std::size_t j = 0; j < i; j++) {
-                if (array[i] == array[j]) {
-                    return true;
-                }
-            }
-        }
-
-        return false;
-    }
-
-    std::tuple<T...> mParams;
-};
-}
-
-#endif /* AIDGE_CORE_UTILS_STATICPARAMETERS_H_ */
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 3d44ab90c..372afebdd 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
-  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Operator, Parameters>(
+  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Operator, Attributes>(
     m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 146d3f10b..f43381fec 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -21,7 +21,7 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, Parameters>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, Attributes>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
 
     m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index af4a5d648..0c09917d7 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -25,7 +25,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
-  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Operator, Parameters>(
+  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Operator, Attributes>(
     m, ("ConvOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<DimSize_t,
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 333bad8bb..3f48c50f7 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
-  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Operator, Parameters>(
+  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Operator, Attributes>(
     m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 963db0caa..4b9d61d08 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -20,7 +20,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Parameters>(m, "FC_Op", py::multiple_inheritance());
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Attributes>(m, "FC_Op", py::multiple_inheritance());
 
   m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 9cb33f6ae..79974ef5c 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -20,7 +20,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
-    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator, DynamicParameters>(m, "GenericOperatorOp",
+    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator, DynamicAttributes>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance());
     m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nbDataIn"), py::arg("nbIn"), py::arg("nbOut"),
           py::arg("name") = "");
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 0592a2869..cae8a88ba 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -18,7 +18,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, Parameters>(m, "LeakyReLU_Op", py::multiple_inheritance());
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, Attributes>(m, "LeakyReLU_Op", py::multiple_inheritance());
 
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index 4ad18ff74..57d32a9a3 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -20,7 +20,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_Matmul(py::module &m) {
-  py::class_<Matmul_Op, std::shared_ptr<Matmul_Op>, Operator, Parameters>(m, "Matmul_Op", py::multiple_inheritance());
+  py::class_<Matmul_Op, std::shared_ptr<Matmul_Op>, Operator, Attributes>(m, "Matmul_Op", py::multiple_inheritance());
 
   m.def("Matmul", &Matmul, py::arg("out_channels"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 45daf075e..2efd18c81 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
-  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, Parameters>(
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, Attributes>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 293e33ff0..e2c2dd5b1 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -17,7 +17,7 @@ namespace Aidge {
 void init_Data(py::module&);
 void init_Tensor(py::module&);
 void init_OperatorImpl(py::module&);
-void init_Parameters(py::module&);
+void init_Attributes(py::module&);
 void init_Operator(py::module&);
 
 void init_Add(py::module&);
@@ -65,7 +65,7 @@ void init_Aidge(py::module& m){
     init_Connector(m);
 
     init_OperatorImpl(m);
-    init_Parameters(m);
+    init_Attributes(m);
     init_Operator(m);
     init_Add(m);
     init_AvgPooling(m);
diff --git a/python_binding/utils/pybind_Parameter.cpp b/python_binding/utils/pybind_Parameter.cpp
index 909b3063b..fcd32c14d 100644
--- a/python_binding/utils/pybind_Parameter.cpp
+++ b/python_binding/utils/pybind_Parameter.cpp
@@ -1,25 +1,25 @@
 #include <pybind11/pybind11.h>
-#include "aidge/utils/Parameters.hpp"
-#include "aidge/utils/DynamicParameters.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
-void init_Parameters(py::module& m){
-    py::class_<Parameters, std::shared_ptr<Parameters>>(m, "Parameters")
-    .def("is_parameter", &Parameters::isParameter)
-    .def("get_parameter_type", &Parameters::getParameterType)
-    .def("get_parameters_name", &Parameters::getParametersName)
-    .def("get_parameter", &Parameters::getPy, py::arg("name"));
+void init_Attributes(py::module& m){
+    py::class_<Attributes, std::shared_ptr<Attributes>>(m, "Attributes")
+    .def("has_attr", &Attributes::hasAttr)
+    .def("get_attr_type", &Attributes::getAttrType)
+    .def("get_attrs_name", &Attributes::getAttrsName)
+    .def("get_attr", &Attributes::getAttrPy, py::arg("name"));
 
-    py::class_<DynamicParameters, std::shared_ptr<DynamicParameters>, Parameters>(m, "DynamicParameters")
-    .def("add_parameter", &DynamicParameters::addParameter<bool>)
-    .def("add_parameter", &DynamicParameters::addParameter<int>)
-    .def("add_parameter", &DynamicParameters::addParameter<float>)
-    .def("add_parameter", &DynamicParameters::addParameter<std::string>)
-    .def("add_parameter", &DynamicParameters::addParameter<std::vector<bool>>)
-    .def("add_parameter", &DynamicParameters::addParameter<std::vector<int>>)
-    .def("add_parameter", &DynamicParameters::addParameter<std::vector<float>>)
-    .def("add_parameter", &DynamicParameters::addParameter<std::vector<std::string>>);
+    py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
+    .def("add_attr", &DynamicAttributes::addAttr<bool>)
+    .def("add_attr", &DynamicAttributes::addAttr<int>)
+    .def("add_attr", &DynamicAttributes::addAttr<float>)
+    .def("add_attr", &DynamicAttributes::addAttr<std::string>)
+    .def("add_attr", &DynamicAttributes::addAttr<std::vector<bool>>)
+    .def("add_attr", &DynamicAttributes::addAttr<std::vector<int>>)
+    .def("add_attr", &DynamicAttributes::addAttr<std::vector<float>>)
+    .def("add_attr", &DynamicAttributes::addAttr<std::vector<std::string>>);
 }
 }
 
diff --git a/src/graphmatching/NodeRegex.cpp b/src/graphmatching/NodeRegex.cpp
index 5fa984eb6..9bf164f60 100644
--- a/src/graphmatching/NodeRegex.cpp
+++ b/src/graphmatching/NodeRegex.cpp
@@ -12,7 +12,7 @@
 #include "aidge/graphmatching/NodeRegex.hpp"
 
 
-// Verification done by the Parameter system
+// Verification done by the Attribute system
 
 
 // Version 1 - Only test the type of the node (no need for a lexer)
@@ -39,8 +39,8 @@ bool Aidge::NodeRegex::isA(std::string NodeType){
 /**bool NodeRegex::_is(string &Node_op){
     // Parsing the condition is done in the initialization of the NodeRegex
     
-    // assert parameters exist in the node with the parameter function isParameter()
+    // assert attributes exist in the node with the attribute function hasAttr()
 
-    // get the parameters
+    // get the attributes
 
 }*/
diff --git a/src/recipies/LabelGraph.cpp b/src/recipies/LabelGraph.cpp
index 7ac2cbf6c..369336f79 100644
--- a/src/recipies/LabelGraph.cpp
+++ b/src/recipies/LabelGraph.cpp
@@ -22,7 +22,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == Conv_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->get<ConvParam::KernelDims>(), op->get<ConvParam::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<ConvAttr::KernelDims>(), op->getAttr<ConvAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -30,7 +30,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == ConvDepthWise_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<ConvDepthWise_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->get<ConvDepthWiseParam::KernelDims>(), op->get<ConvDepthWiseParam::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<ConvDepthWiseAttr::KernelDims>(), op->getAttr<ConvDepthWiseAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -38,7 +38,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == AvgPooling_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<AvgPooling_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->get<AvgPoolingParam::KernelDims>(), op->get<AvgPoolingParam::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->getAttr<AvgPoolingAttr::KernelDims>(), op->getAttr<AvgPoolingAttr::StrideDims>());
         return std::make_shared<Node>(newOp, node->name());
     }
 
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 220839989..c7b447e91 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -17,72 +17,72 @@
 
 using namespace Aidge;
 
-TEST_CASE("[core/operators] GenericOp(add & get parameters)", "[Operator]") {
+TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("INT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        const char* key = "intParam";
-        Testop.addParameter(key, int(5));
-        int registeredVal = Testop.getParameter<int>(key);
+        const char* key = "intAttr";
+        Testop.addAttr(key, int(5));
+        int registeredVal = Testop.getAttr<int>(key);
         REQUIRE(registeredVal == 5);
     }
     SECTION("LONG") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         long value = 3;
-        const char* key = "longParam";
-        Testop.addParameter(key, value);
-        REQUIRE(Testop.getParameter<long>(key) == value);
+        const char* key = "longAttr";
+        Testop.addAttr(key, value);
+        REQUIRE(Testop.getAttr<long>(key) == value);
     }
     SECTION("FLOAT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         float value = 2.0;
-        const char* key = "floatParam";
-        Testop.addParameter(key, value);
-        REQUIRE(Testop.getParameter<float>(key) == value);
+        const char* key = "floatAttr";
+        Testop.addAttr(key, value);
+        REQUIRE(Testop.getAttr<float>(key) == value);
     }
      SECTION("VECTOR<BOOL>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<bool> value = {true, false, false, true, true};
         const char* key = "vect";
-        Testop.addParameter(key, value);
+        Testop.addAttr(key, value);
 
-        REQUIRE(Testop.getParameter<std::vector<bool>>(key).size() == value.size());
+        REQUIRE(Testop.getAttr<std::vector<bool>>(key).size() == value.size());
         for (std::size_t i=0; i < value.size(); ++i){
-            REQUIRE(Testop.getParameter<std::vector<bool>>(key)[i] == value[i]);
+            REQUIRE(Testop.getAttr<std::vector<bool>>(key)[i] == value[i]);
         }
     }
     SECTION("VECTOR<INT>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<int> value = {1, 2, 3, 4, 5, 6, 7, 8, 9};
         const char* key = "vect";
-        Testop.addParameter(key, value);
+        Testop.addAttr(key, value);
 
-        REQUIRE(Testop.getParameter<std::vector<int>>(key).size() == value.size());
+        REQUIRE(Testop.getAttr<std::vector<int>>(key).size() == value.size());
         for (std::size_t i=0; i < value.size(); ++i){
-            REQUIRE(Testop.getParameter<std::vector<int>>(key)[i] == value[i]);
+            REQUIRE(Testop.getAttr<std::vector<int>>(key)[i] == value[i]);
         }
     }
     SECTION("MULTIPLE PARAMS") {
         /*
-        Goal : Test that the offsets are well done by adding different parameters with different size.
+        Goal : Test that the offsets are well done by adding different attributes with different size.
         */
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addParameter<long>("longParam", 3);
-        Testop.addParameter<float>("floatParam", 2.0);
-        Testop.addParameter<uint8_t>("uint8Param", 5);
-        Testop.addParameter<long long>("llParam", 10);
-        REQUIRE(Testop.getParameter<long>("longParam") == 3);
-        REQUIRE(Testop.getParameter<float>("floatParam") == 2.0);
-        REQUIRE(Testop.getParameter<uint8_t>("uint8Param") == 5);
-        REQUIRE(Testop.getParameter<long long>("llParam") == 10);
+        Testop.addAttr<long>("longAttr", 3);
+        Testop.addAttr<float>("floatAttr", 2.0);
+        Testop.addAttr<uint8_t>("uint8Attr", 5);
+        Testop.addAttr<long long>("llAttr", 10);
+        REQUIRE(Testop.getAttr<long>("longAttr") == 3);
+        REQUIRE(Testop.getAttr<float>("floatAttr") == 2.0);
+        REQUIRE(Testop.getAttr<uint8_t>("uint8Attr") == 5);
+        REQUIRE(Testop.getAttr<long long>("llAttr") == 10);
     }
 }
 
 TEST_CASE("[core/operator] GenericOp(type check)", "[.ass]") {
     SECTION("WRONG TYPE FOR GETTER") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addParameter<long>("longParam", 3);
+        Testop.addAttr<long>("longAttr", 3);
 
         // This line should raise a failled assert
-        REQUIRE_THROWS(Testop.getParameter<int>("longParameter"));
+        REQUIRE_THROWS(Testop.getAttr<int>("longAttribute"));
     }
 }
-- 
GitLab