diff --git a/aidge_core/aidge_export_aidge/operator_export/producer.py b/aidge_core/aidge_export_aidge/operator_export/producer.py
index c04019043a6cd7d1a0de95a0ba32f2bb7a3a4bec..d082e9726b7ca33fbe6f4692bf7b55930b69cb9d 100644
--- a/aidge_core/aidge_export_aidge/operator_export/producer.py
+++ b/aidge_core/aidge_export_aidge/operator_export/producer.py
@@ -1,22 +1,22 @@
 from aidge_core.aidge_export_aidge.utils import operator_register
 from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from aidge_core import DataType, ExportNode, generate_file, generate_str
+from aidge_core import dtype, ExportNode, generate_file, generate_str
 import numpy as np
 from pathlib import Path
 
 # Convert aidge datatype to C++ type
 datatype_converter = {
-    DataType.Float64 : "double",
-    DataType.Float32 : "float",
-    DataType.Float16 : "half_float::half",
-    DataType.Int8    : "int8_t",
-    DataType.Int16   : "int16_t",
-    DataType.Int32   : "int32_t",
-    DataType.Int64   : "int64_t",
-    DataType.UInt8   : "uint8_t",
-    DataType.UInt16  : "uint16_t",
-    DataType.UInt32  : "uint32_t",
-    DataType.UInt64  : "uint64_t"
+    dtype.float64 : "double",
+    dtype.float32 : "float",
+    dtype.float16 : "half_float::half",
+    dtype.int8    : "int8_t",
+    dtype.int16   : "int16_t",
+    dtype.int32   : "int32_t",
+    dtype.int64   : "int64_t",
+    dtype.uint8   : "uint8_t",
+    dtype.uint16  : "uint16_t",
+    dtype.uint32  : "uint32_t",
+    dtype.uint64  : "uint64_t"
 }
 
 
diff --git a/aidge_core/export_utils/node_export.py b/aidge_core/export_utils/node_export.py
index 7262e9a837424158b8896f305894dcc57769520c..80c37dd0a54d57561ce1a872ea540461aeec30a0 100644
--- a/aidge_core/export_utils/node_export.py
+++ b/aidge_core/export_utils/node_export.py
@@ -20,10 +20,7 @@ class ExportNode(ABC):
         self.node = aidge_node
         self.operator = aidge_node.get_operator()
         self.name = self.node.name()
-        self.attributes = {} # Attributes are auto fetched from aidge operators
-        if isinstance(self.operator, aidge_core.Attributes):
-            for attr_name in self.operator.get_attrs_name():
-                self.attributes[attr_name] = self.operator.get_attr(attr_name)
+        self.attributes = self.operator.attr.dict() if self.operator.attr is not None else {} # Attributes are auto fetched from aidge operators
 
         # rename is_leaf ?
         self.is_last = len(self.node.get_children()) == 0
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index 164aee726255e0478b629ee853d9a1f619945f3a..5b25eb7975d439816dbf91cc95b462f217fd0227 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -30,42 +30,39 @@ class test_operator_binding(unittest.TestCase):
         self.assertNotEqual(gop.name(), "")
 
     def test_param_bool(self):
-        self.generic_operator.add_attr("bool", True)
-        self.assertEqual(self.generic_operator.has_attr("bool"), True)
-        self.assertEqual(self.generic_operator.get_attr("bool"), True)
-        self.assertEqual(self.generic_operator.get_attr_type("bool"), "bool")
-        self.assertEqual(self.generic_operator.get_attrs_name(), {"bool"})
-        self.generic_operator.del_attr("bool")
-        self.assertEqual(self.generic_operator.has_attr("bool"), False)
-        self.assertEqual(len(self.generic_operator.get_attrs_name()), 0)
+        self.generic_operator.attr.add_attr("bool", True)
+        self.assertEqual(self.generic_operator.attr.has_attr("bool"), True)
+        self.assertEqual(self.generic_operator.attr.get_attr("bool"), True)
+        self.generic_operator.attr.del_attr("bool")
+        self.assertEqual(self.generic_operator.attr.has_attr("bool"), False)
 
     def test_param_int(self):
-        self.generic_operator.add_attr("int", 1)
-        self.assertEqual(self.generic_operator.get_attr("int"), 1)
+        self.generic_operator.attr.add_attr("int", 1)
+        self.assertEqual(self.generic_operator.attr.get_attr("int"), 1)
 
     def test_param_float(self):
-        self.generic_operator.add_attr("float", 2.0)
-        self.assertEqual(self.generic_operator.get_attr("float"), 2.0)
+        self.generic_operator.attr.add_attr("float", 2.0)
+        self.assertEqual(self.generic_operator.attr.get_attr("float"), 2.0)
 
     def test_param_str(self):
-        self.generic_operator.add_attr("str", "value")
-        self.assertEqual(self.generic_operator.get_attr("str"), "value")
+        self.generic_operator.attr.add_attr("str", "value")
+        self.assertEqual(self.generic_operator.attr.get_attr("str"), "value")
 
     def test_param_l_int(self):
-        self.generic_operator.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
-        self.assertEqual(self.generic_operator.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.generic_operator.attr.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
 
     def test_param_l_bool(self):
-        self.generic_operator.add_attr("l_bool", [True, False, False, True])
-        self.assertEqual(self.generic_operator.get_attr("l_bool"), [True, False, False, True])
+        self.generic_operator.attr.add_attr("l_bool", [True, False, False, True])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_bool"), [True, False, False, True])
 
     def test_param_l_float(self):
-        self.generic_operator.add_attr("l_float", [2.0, 1.0])
-        self.assertEqual(self.generic_operator.get_attr("l_float"), [2.0, 1.0])
+        self.generic_operator.attr.add_attr("l_float", [2.0, 1.0])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_float"), [2.0, 1.0])
 
     def test_param_l_str(self):
-        self.generic_operator.add_attr("l_str", ["ok"])
-        self.assertEqual(self.generic_operator.get_attr("l_str"), ["ok"])
+        self.generic_operator.attr.add_attr("l_str", ["ok"])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_str"), ["ok"])
 
     def test_dynamicattribute_binding(self):
         # Check original C++ attributes are binded
@@ -76,20 +73,20 @@ class test_operator_binding(unittest.TestCase):
         self.assertEqual(attrs.get_attr("b"), "test")
         self.assertEqual(attrs.has_attr("c"), True)
         self.assertEqual(attrs.get_attr("c"), [True, False, True])
-        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c"})
+        self.assertEqual(attrs.dict().keys(), {"a", "b", "c"})
         self.assertEqual(attrs.has_attr("d"), False)
 
         # Add Python attributes
         attrs.add_attr("d", 18.56)
         self.assertEqual(attrs.get_attr("d"), 18.56)
         self.assertEqual(attrs.has_attr("d"), True)
-        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c", "d"})
+        self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d"})
         self.assertEqual(attrs.has_attr("e"), False)
 
         # Check that added Python attribute is accessible in C++
         # Return the value of an attribute named "d" of type float64 (double in C++)
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 18.56)
-        attrs.set_attr("d", 23.89)
+        attrs.d = 23.89
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
 
     def test_forward_dims(self):
@@ -129,18 +126,18 @@ class test_operator_binding(unittest.TestCase):
         myVar = 2
         myBool = True
         # Test dynamic attribute set
-        gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", myVar=myVar).get_operator()
-        gop.myBool = myBool
+        gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", my_var=myVar).get_operator()
+        gop.attr.my_bool = myBool
         # Test variable set by kwargs
-        self.assertEqual(gop.myVar, myVar)
+        self.assertEqual(gop.attr.my_var, myVar)
         # Test set attr
-        self.assertEqual(gop.myBool, myBool)
+        self.assertEqual(gop.attr.my_bool, myBool)
 
         # Test static attribute set !
         prod = aidge_core.Producer([1]).get_operator()
-        self.assertEqual(prod.Constant, False)
-        prod.Constant = True # By default Constant is False
-        self.assertEqual(prod.Constant, True)
+        self.assertEqual(prod.attr.constant, False)
+        prod.attr.constant = True # By default Constant is False
+        self.assertEqual(prod.attr.constant, True)
 
 
 
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index a8143c5e86ec82f6e595136cb2b4fa9175abffd3..7c3bc0f6f68506c02af1723b263455a9c72b1f3a 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -29,7 +29,7 @@ class test_attributes(unittest.TestCase):
         conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator()
         self.assertEqual(conv_op.in_channels(), in_channels)
         self.assertEqual(conv_op.out_channels(), out_channels)
-        self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
+        self.assertEqual(conv_op.attr.get_attr("kernel_dims"), k_dims)
 
     def test_fc(self):
         in_channels = 4
@@ -65,7 +65,7 @@ class test_attributes(unittest.TestCase):
     def test_leaky_relu(self):
         negative_slope = 0.25
         leakyrelu_op = aidge_core.LeakyReLU(negative_slope).get_operator()
-        self.assertEqual(leakyrelu_op.get_attr("NegativeSlope"), negative_slope)
+        self.assertEqual(leakyrelu_op.attr.get_attr("negative_slope"), negative_slope)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/aidge_core/unit_tests/test_tensor.py b/aidge_core/unit_tests/test_tensor.py
index d479c98b20534daa804f6019b63d528883c2b568..6348ba8dd1a635ce0299760b6fd31dcef58716cf 100644
--- a/aidge_core/unit_tests/test_tensor.py
+++ b/aidge_core/unit_tests/test_tensor.py
@@ -42,7 +42,7 @@ class test_tensor(unittest.TestCase):
         np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Int32)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int32)
         for i_t, i_n in zip(t, np_array.flatten()):
             self.assertTrue(i_t == i_n)
         for i,j in zip(t.dims(), np_array.shape):
@@ -62,7 +62,7 @@ class test_tensor(unittest.TestCase):
         np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Int64)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int64)
         for i_t, i_n in zip(t, np_array.flatten()):
             self.assertTrue(i_t == i_n)
         for i,j in zip(t.dims(), np_array.shape):
@@ -73,7 +73,7 @@ class test_tensor(unittest.TestCase):
         np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Float32)
+        self.assertEqual(t.dtype(), aidge_core.dtype.float32)
         for i_t, i_n in zip(t, np_array.flatten()):
             self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference
         for i,j in zip(t.dims(), np_array.shape):
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 8fe2263c0aa2a2a3e70dc458ababc406b6823e0d..eaadc7a7ca5fa85672619fb2d3b5b17590fd3778 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -79,7 +79,7 @@ constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
  * Get the DataFormatTranspose array to transpose data from src to dst DataFormat.
  * @param src Source DataFormat
  * @param dst Destinatin DataFormat
- * @return DataFormatTranspose Permutation array to achieve a transposition 
+ * @return DataFormatTranspose Permutation array to achieve a transposition
  *         from src to dst DataFormat.
 */
 constexpr inline DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index b09064c36f65a1e00d99ce5e2ff559e31681b065..682634015376bf309a015046decfa40a36e2b177 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -20,9 +20,18 @@
 #include <utility>
 #include <vector>
 
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <fmt/format.h>
+#endif
+
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
 namespace Aidge {
 enum class DataType;
 
@@ -218,7 +227,7 @@ public:
      * GraphView object's Nodes, by calling Node::forwardDims().
      * This function verifies the following conditions:
      * - Every node will forwardDims() regardless of if dims were previously forwarded or not;
-     * - forwadDims() calls are made in node dependencies order, because if dims have changed 
+     * - forwadDims() calls are made in node dependencies order, because if dims have changed
      *   at any point in the graph, it must de propagated correctly to all succeeding nodes;
      * - It handles cyclic dependencies correctly (currently only induced by the Memorize_Op).
      */
@@ -266,7 +275,7 @@ public:
      * @brief Get the Nodes pointed to by the GraphView object.
      * @return std::set<NodePtr>
      */
-    inline const std::set<NodePtr>& getNodes() const { return mNodes; }
+    inline const std::set<NodePtr>& getNodes() const noexcept { return mNodes; }
 
     /**
      * @brief Get the operator with the corresponding name if it is in the
@@ -460,8 +469,8 @@ public:
      * @return true replacement has been performed
      * @return false no replacement has been performed
      */
-    static bool replace(const std::shared_ptr<GraphView>& oldG, const std::shared_ptr<GraphView>& newG);
     static bool replace(const std::set<NodePtr>& oldNodes, const std::set<NodePtr>& newNodes);
+    static bool replace(const std::shared_ptr<GraphView>& oldG, const std::shared_ptr<GraphView>& newG);
 
     /**
      * @brief Clone the GraphView with shared Operators. It is a new GraphView, with cloned Nodes, but the new Nodes refer to the same Operators as the original ones.
@@ -509,6 +518,11 @@ public:
      */
     void updateInputsOutputs();
 
+#ifdef PYBIND
+    std::string repr() const {
+        return fmt::format("GraphView(name='{}', Nodes: {} (inputs: {}, outputs: {}))", name(), mNodes.size(), mInputNodes.size(), mOutputNodes.size());
+    }
+#endif
 private:
 ///////////////////////////////////////////////////////
 //        TENSOR MANAGEMENT
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index bad323c8629b67282bfb217d188b15ba43711662..f694a1234b6037a0ae75a89380af9747765e290c 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -19,10 +19,19 @@
 #include <vector>
 #include <utility>
 
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <fmt/format.h>
+#endif
+
 #include "aidge/graph/Connector.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
 
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
 namespace Aidge {
 
 using NodePtr = std::shared_ptr<Node>;
@@ -423,6 +432,27 @@ public:
 
   std::set<NodePtr> getNodeDelta(int delta,std::set<Aidge::NodePtr> nodeSee);
 
+#ifdef PYBIND
+    std::string repr() const {
+        std::string nodeString{fmt::format("Node(name='{}', optype='{}'", name(), type())};
+        if (mParents.size() > 0) {
+            std::vector<std::int8_t> connectedParents(mParents.size(), 0);
+            for (std::size_t i = 0; i < nbInputs(); ++i) {
+                if (mParents[i])
+                    connectedParents[i] = std::int8_t(1);
+            }
+            nodeString = fmt::format("{}, parents: {}", nodeString, connectedParents);
+        }
+        if (mChildren.size() > 0) {
+            std::vector<std::vector<std::int8_t>> connectedChildren{};
+            for (std::size_t i = 0; i < nbOutputs(); ++i) {
+                connectedChildren.push_back(std::vector<std::int8_t>(mChildren[i].size(), std::int8_t(1)));
+            }
+            nodeString = fmt::format("{}, children: {}", nodeString, connectedChildren);
+        }
+        return fmt::format("{})", nodeString);
+    }
+#endif
 
 private:
   ///////////////////////////////////////////////////////
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index f1a7723ea64d713e497b039ca2eb5bb2f4620e62..06ee4327e2f2d4df32c2decd73841bdf5f79a739 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -28,27 +28,31 @@ enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public OperatorTensor,
-                public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
-                public StaticAttributes<AvgPoolingAttr,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>> {
+                public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)> {
 
 public:
     static const std::string Type;
 
-    AvgPooling_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<AvgPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>>;
     template <AvgPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    AvgPooling_Op() = delete;
+
 
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
         : OperatorTensor(Type, {InputCategory::Data}, 1),
-          Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
-                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {}
+          mAttributes(std::make_shared<Attributes_>(
+                        attr<AvgPoolingAttr::StrideDims>(stride_dims),
+                        attr<AvgPoolingAttr::KernelDims>(kernel_dims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -76,6 +80,10 @@ public:
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<AvgPoolingAttr::KernelDims>(); }
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
@@ -101,8 +109,6 @@ inline std::shared_ptr<Node> AvgPooling(
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
     return AvgPooling(to_array(kernel_dims), name, stride_dims);
 }
-
-
 }  // namespace Aidge
 
 extern template class Aidge::AvgPooling_Op<1>;
@@ -112,8 +118,10 @@ extern template class Aidge::AvgPooling_Op<4>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
-                                                          "KernelDims"};
+const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
+    "StrideDims",
+    "KernelDims"
+};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index e2ae5276d5ef16f2a06036bcfef3398cba664894..b5b64eb428d709e804dd9f6711530b348e0be747 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -28,21 +28,31 @@ enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public OperatorTensor,
-                public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
-                public StaticAttributes<BatchNormAttr, float, float> {
+                public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)> {
 public:
     static const std::string Type;
 
-    BatchNorm_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<BatchNormAttr, float, float>;
     template <BatchNormAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    BatchNorm_Op() = delete;
 
     constexpr BatchNorm_Op(float epsilon, float momentum)
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::Param, InputCategory::Param, InputCategory::Param}, 1),
-          Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
-                           attr<BatchNormAttr::Momentum>(momentum)) {}
+        : OperatorTensor(Type,
+                            {InputCategory::Data,
+                                InputCategory::Param,
+                                InputCategory::Param,
+                                InputCategory::Param,
+                                InputCategory::Param},
+                            1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<BatchNormAttr::Epsilon>(epsilon),
+            attr<BatchNormAttr::Momentum>(momentum))) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -72,6 +82,10 @@ public:
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& epsilon() const { return mAttributes->template getAttr<BatchNormAttr::Epsilon>(); }
+    inline float& momentum() const { return mAttributes->template getAttr<BatchNormAttr::Momentum>(); }
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "scale", "shift", "mean", "variance"};
     }
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 98a6daf172813614b3052e210a42fbf62df0ca29..6911053932afff6675be4eb2c713d8d3cd34b462 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -19,8 +19,8 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -30,21 +30,31 @@ public:
     void forward() override;
 };
 
+enum class CastAttr { TargetType };
+
 class Cast_Op : public OperatorTensor,
     public Registrable<Cast_Op, std::string, std::unique_ptr<OperatorImpl>(const Cast_Op&)> {
 public:
     static const std::string Type;
 
-    Cast_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {
-        mImpl = std::make_shared<Cast_OpImpl>(*this);
-    }
+private:
+    using Attributes_ = StaticAttributes<CastAttr, DataType>;
+    template <CastAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Cast_Op() = delete;
+
+    Cast_Op(const DataType targetType);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Cast_Op(const Cast_Op& op)
-        : OperatorTensor(op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Cast_Op, *this, op.backend());
@@ -64,6 +74,9 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline DataType& targetType() const { return mAttributes->template getAttr<CastAttr::TargetType>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -72,9 +85,15 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Cast(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Cast_Op>(), name);
+
+inline std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
 }
+} // namespace Aidge
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::CastAttr>::data[] = { "TargetType" };
 }
 
-#endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index a9b3812f4daee3d6ca4c97021af757d255e2aa06..8341a93fe66d260ae3687170629b8759d0305a9c 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -28,25 +28,32 @@
 namespace Aidge {
 class Concat_OpImpl : public OperatorImpl {
 public:
-    Concat_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Concat_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend)
+    {}
     void forward() override;
 };
 
 enum class ConcatAttr { Axis };
 
 class Concat_Op : public OperatorTensor,
-    public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)>,
-    public StaticAttributes<ConcatAttr, DimSize_t> {
+    public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)> {
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<ConcatAttr, DimSize_t>;
+private:
+    using Attributes_ = StaticAttributes<ConcatAttr, std::int32_t>;
     template <ConcatAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    Concat_Op(const IOIndex_t nbIn, const DimSize_t axis)
+public:
+    Concat_Op() = delete;
+
+    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis)
         : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
-          Attributes_(attr<ConcatAttr::Axis>(axis))
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ConcatAttr::Axis>(axis)))
     {
         if (nbIn == 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
@@ -60,7 +67,7 @@ public:
      */
     Concat_Op(const Concat_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Concat_Op, *this, op.backend());
@@ -82,6 +89,9 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input_0", "data_input_n"};
     }
@@ -90,7 +100,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const DimIdx_t axis = 0, const std::string& name = "") {
+inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
 }
 }
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index d529c26c420d6e50030a19ac250241a1009e6ab4..87ff5854b310ca472994bd6b68fd6ae58d31e806 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -34,33 +34,32 @@ enum class ConvAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
-                public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
-                public StaticAttributes<ConvAttr,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>> {
+                public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)> {
 
 public:
     static const std::string Type;
 
-    Conv_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ConvAttr,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>>;
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Conv_Op() = delete;
 
     constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                       const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
         : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
-          Attributes_(attr<ConvAttr::StrideDims>(strideDims),
-                      attr<ConvAttr::DilationDims>(dilationDims),
-                    //   attr<ConvAttr::InChannels>(inChannels),
-                    //   attr<ConvAttr::OutChannels>(outChannels),
-                      attr<ConvAttr::KernelDims>(kernelDims)) {}
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ConvAttr::StrideDims>(strideDims),
+            attr<ConvAttr::DilationDims>(dilationDims),
+            attr<ConvAttr::KernelDims>(kernelDims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -113,6 +112,12 @@ public:
         return getInput(1)->template dims<DIM+2>()[0];
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); }
+
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 68549f4ef08018b4304936520e45ee3940aa9c41..c8a83ff7de62a61e8125eac29d61c3938115cd09 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -33,30 +33,32 @@ enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
-                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
-                public StaticAttributes<ConvDepthWiseAttr,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>> {
+                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)> {
 public:
     static const std::string Type;
 
-    ConvDepthWise_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>>;
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    ConvDepthWise_Op() = delete;
 
     constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
-          Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
-                      attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)) {}
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
+            attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
+            attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -89,6 +91,11 @@ public:
         return getInput(1)->template dims<DIM+2>()[0];
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 30f76aa448e6caecbd94eda5129ffe66ae8fb8c9..01da37a05414c5994ace767770e7c26fc8cd4646 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -42,9 +42,9 @@ public:
     FC_Op(const FC_Op& op)
         : OperatorTensor(op)
     {
-        if (op.mImpl){
+        if (op.mImpl) {
             SET_IMPL_MACRO(FC_Op, *this, op.backend());
-        }else{
+        } else {
             mImpl = nullptr;
         }
     }
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index a6812a5ce05cfd3a7c9d4badb18a504005d78898..3e9b780732fa9144f2e58bef854d1b42d063d0bf 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_CORE_OPERATOR_GATHER_H_
 #define AIDGE_CORE_OPERATOR_GATHER_H_
 
-#include <cstdint>  // std::int64_t
+#include <cstdint>  // std::int8_t, std::int64_t
 #include <memory>
 #include <string>
 #include <vector>
@@ -36,21 +36,31 @@ enum class GatherAttr { Axis, Indices, GatheredShape };
 class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Gather_Op&)>,
-                public StaticAttributes<GatherAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>> {
-
+                                   std::shared_ptr<OperatorImpl>(const Gather_Op&)> {
 public:
     static const std::string Type;
 
+    using Attributes_ = StaticAttributes<GatherAttr,
+                                            std::int8_t,
+                                            std::vector<int64_t>,
+                                            std::vector<DimSize_t>>;
+private:
+    template <GatherAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
     Gather_Op() = delete;
 
-    using Attributes_ = StaticAttributes<GatherAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>>;
-    template <GatherAttr e> using attr = typename Attributes_::template attr<e>;
-    Gather_Op(std::int8_t axis, const std::vector<int64_t>& indices, const std::vector<DimSize_t>& gatheredShape)
-            : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
-            Attributes_(attr<GatherAttr::Axis>(axis),
-                        attr<GatherAttr::Indices>(indices),
-                        attr<GatherAttr::GatheredShape>(gatheredShape))
+    Gather_Op(std::int8_t axis,
+              const std::vector<int64_t>& indices,
+              const std::vector<DimSize_t>& gatheredShape)
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+            attr<GatherAttr::Axis>(axis),
+            attr<GatherAttr::Indices>(indices),
+            attr<GatherAttr::GatheredShape>(gatheredShape)))
     {
         mImpl = std::make_shared<Gather_OpImpl>(*this);
     }
@@ -61,7 +71,7 @@ public:
      */
     Gather_Op(const Gather_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Gather_Op, *this, op.backend());
@@ -84,6 +94,11 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int8_t& axis() const { return mAttributes -> getAttr<GatherAttr::Axis>(); }
+    inline std::vector<int64_t>& indices() const { return mAttributes -> getAttr<GatherAttr::Indices>(); }
+    inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes -> getAttr<GatherAttr::GatheredShape>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "indices"};
     }
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index cdd4779c1d767f6d46f5b3de8a6bb7a2d0607bc9..8196c4268e669001d99f25ed2cead546e1141aa7 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -26,13 +26,14 @@
 namespace Aidge {
 class GenericOperator_Op
     : public OperatorTensor,
-      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
-      public DynamicAttributes {
+      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> {
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
     ComputeDimsFunc mForwardDims;
 
+    const std::shared_ptr<DynamicAttributes> mAttributes;
+
 public:
     GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut)
         : OperatorTensor(type, inputsCategory, nbOut)
@@ -42,10 +43,11 @@ public:
 
     GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
         : OperatorTensor(type, [nbData, nbParam]() {
-            std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
-            inputsCategory.resize(nbData + nbParam, InputCategory::Param);
-            return inputsCategory;
-        }(), nbOut)
+                                std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
+                                inputsCategory.resize(nbData + nbParam, InputCategory::Param);
+                                return inputsCategory;
+                            }(), nbOut),
+          mAttributes(std::make_shared<DynamicAttributes>())
     {
         mImpl = std::make_shared<OperatorImpl>(*this);
     }
@@ -55,7 +57,8 @@ public:
      * @param op Operator to copy.
      */
     GenericOperator_Op(const GenericOperator_Op& op)
-        : OperatorTensor(op)
+        : OperatorTensor(op),
+          mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
     {
         mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
     }
@@ -74,6 +77,22 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    template <class T>
+    inline T& getAttr(const std::string& name)
+    { return mAttributes -> template getAttr<T>(name); }
+    template <class T>
+    inline const T& getAttr(const std::string& name) const
+    { return mAttributes -> template getAttr<T>(name); }
+
+    ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
+    ///\tparam T expected Attribute type
+    ///\param name Attribute name
+    ///\param value Attribute value
+    template <class T>
+    inline void addAttr(const std::string& name, const T& value) const
+    { mAttributes -> template addAttr<T>(name, value); }
 
     // Helper functions that can be used with setForwardDims():
     static const ComputeDimsFunc Identity;
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 22fe619834290c5a6dbf26614c6b4d1a1bb30b55..294e7ebb009ff184c9150d2aa18067a15deeba22 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -12,16 +12,16 @@
 #ifndef AIDGE_CORE_OPERATOR_LEAKYRELU_H_
 #define AIDGE_CORE_OPERATOR_LEAKYRELU_H_
 
-#include <vector>
 #include <memory>
+#include <vector>
 
-#include "aidge/utils/StaticAttributes.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -30,20 +30,24 @@ enum class LeakyReLUAttr {
 };
 
 class LeakyReLU_Op : public OperatorTensor,
-    public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
-    public StaticAttributes<LeakyReLUAttr, float> {
+    public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)> {
 public:
     static const std::string Type;
 
-    LeakyReLU_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<LeakyReLUAttr, float>;
     template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    LeakyReLU_Op() = delete;
 
     LeakyReLU_Op(float negativeSlope)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
-          Attributes_(
-            attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
+          mAttributes(
+            std::make_shared<Attributes_>(
+                attr<LeakyReLUAttr::NegativeSlope>(negativeSlope)))
     {}
 
     /**
@@ -52,7 +56,7 @@ public:
      */
     LeakyReLU_Op(const LeakyReLU_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
@@ -76,6 +80,9 @@ public:
         mOutputs[0]->setBackend(name, device);
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 97e3d19b94ecf21234047a7d291a315c946e3f0f..082aa26bbdf1d55dcae29d1ffb2b9810db8b17d0 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -34,30 +34,31 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public OperatorTensor,
-                public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
-                public StaticAttributes<MaxPoolingAttr,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       bool> {
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)> {
 public:
     static const std::string Type;
 
-    MaxPooling_Op() = delete;
-
     using Attributes_ = StaticAttributes<MaxPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              bool>;
+
+private:
     template <MaxPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    MaxPooling_Op() = delete;
 
     constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                             bool ceil_mode = false)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
-          Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
-                      attr<MaxPoolingAttr::KernelDims>(kernel_dims),
-                      attr<MaxPoolingAttr::CeilMode>(ceil_mode))
+          mAttributes(std::make_shared<Attributes_>(
+            attr<MaxPoolingAttr::StrideDims>(stride_dims),
+            attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+            attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
         {}
 
     /**
@@ -66,7 +67,7 @@ public:
      */
     MaxPooling_Op(const MaxPooling_Op<DIM>& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl) {
             SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
@@ -90,17 +91,17 @@ public:
             const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
             std::function<float(float)> roundingFunction;
-            if (this->template getAttr<MaxPoolingAttr::CeilMode>()) {
+            if (mAttributes->template getAttr<MaxPoolingAttr::CeilMode>()) {
                 roundingFunction = [](float x) { return std::ceil(x); };
             } else {
                 roundingFunction = [](float x) { return std::floor(x); };
             }
 
-            for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
+            for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                             roundingFunction(static_cast<float>(inputDims[dim+2] -
-                                                                    this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
-                                            static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
+                                                                    mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
+                                            static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = inputDims[1];
             outputDims[0] = inputDims[0];
@@ -116,6 +117,11 @@ public:
         mOutputs[0]->setBackend(name, device);
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<MaxPoolingAttr::KernelDims>(); }
+    inline bool& ceilMode() const { return mAttributes->template getAttr<MaxPoolingAttr::CeilMode>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index fbda267c6d1fe40c9e8421b5db44466e463ee0a4..d6af56f2faad18b9e39c793ea68e39eac4dd2f01 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -37,20 +37,25 @@ public:
 enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep };
 
 class Memorize_Op : public OperatorTensor,
-    public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)>,
-    public StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int> {
+    public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)> {
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int>;
+private:
+    using Attributes_ = StaticAttributes<MemorizeAttr, std::uint32_t, std::uint32_t, std::uint32_t>;
     template <MemorizeAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    Memorize_Op(const unsigned int endStep)
+public:
+    Memorize_Op() = delete;
+
+    Memorize_Op(const std::uint32_t endStep)
         : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
-          Attributes_(attr<MemorizeAttr::ScheduleStep>(0),
-                      attr<MemorizeAttr::ForwardStep>(0),
-                      attr<MemorizeAttr::EndStep>(endStep))
+          mAttributes(std::make_shared<Attributes_>(
+                        attr<MemorizeAttr::ScheduleStep>(0),
+                        attr<MemorizeAttr::ForwardStep>(0),
+                        attr<MemorizeAttr::EndStep>(endStep)))
     {
         mOutputs[1] = mOutputs[0];
     }
@@ -62,7 +67,7 @@ public:
      */
     Memorize_Op(const Memorize_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl) {
             SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
@@ -87,6 +92,11 @@ public:
     void updateConsummerProducer() override;
     void forward() override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<MemorizeAttr::ScheduleStep>(); }
+    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<MemorizeAttr::ForwardStep>(); }
+    inline std::uint32_t& endStep() const { return mAttributes->template getAttr<MemorizeAttr::EndStep>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "data_input_init"};
     }
@@ -95,7 +105,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Memorize(const unsigned int endStep, const std::string& name = "") {
+inline std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
 }
 }  // namespace Aidge
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 8fb6db20ac2e0f3e244bc8f32cc03cb27ec8db6e..adec17d07f39727a0c75d32fa24bcc624aa66e1a 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -18,11 +18,21 @@
 #include <utility>
 #include <cstddef>
 
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <fmt/format.h>
+#endif
+
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Data.hpp"
+#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/hook/Hook.hpp"
 
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
 namespace Aidge {
 
 enum class OperatorType {
@@ -77,12 +87,14 @@ public:
 public:
     virtual std::shared_ptr<Operator> clone() const = 0;
 
+    virtual std::shared_ptr<Attributes> attributes() const { return nullptr; };
     /**
      * @brief Set the specified input with a shallow copy.
      * @param inputIdx Index of the input to set.
      * @param data Data to copy.
      */
     virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
+    virtual void resetInput(const IOIndex_t inputIdx) = 0;
 
     /**
      * @brief Set the specified input value by performing a deep copy of the given data.
@@ -199,6 +211,17 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {};
     }
+
+#ifdef PYBIND
+    std::string repr() const {
+        return fmt::format("Operator(type = '{}', nb_in = {}, nb_out = {}, attr = {}, backend = {})",
+                    type(),
+                    nbInputs(),
+                    nbOutputs(),
+                    (attributes() ? attributes()->repr() : "None"),
+                    (mImpl ? "'"+backend()+"'" : "None"));
+    }
+#endif
 };
 } // namespace Aidge
 
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index d7627ab2a83f988ccd0964aa622b23468e83b8f1..657a6d8ab6124b8919a3ac8fea5b6bfa6c4254b9 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -51,6 +51,7 @@ public:
     ///////////////////////////////////////////////////
     virtual void associateInput(const IOIndex_t inputIdx,
                                 const std::shared_ptr<Data>& data) override;
+    void resetInput(const IOIndex_t inputIdx) override final;
     ///////////////////////////////////////////////////
 
     ///////////////////////////////////////////////////
@@ -84,7 +85,7 @@ public:
 
     virtual void setDataType(const DataType& dataType) const override;
     virtual void setDataFormat(const DataFormat& dataFormat) const override;
-    
+
     virtual void forward() override;
 
 protected:
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 2eef92d26e9a738845e08acabbb241f26cc1cc6b..5fd0f93986206e6cd958a85055159783eeb8bc8f 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -13,16 +13,16 @@
 #define AIDGE_CORE_OPERATOR_PAD_H_
 
 #include <array>
-#include <numeric>
+#include <memory>
+#include <string>
 #include <vector>
-#include <cmath>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -31,30 +31,31 @@ enum class PadBorderType { Constant, Edge, Reflect, Wrap };
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
-                public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
-                public StaticAttributes<PadAttr,
-                                       std::array<DimSize_t, 2*DIM>,
-                                       PadBorderType,
-                                       double> {
+                public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)> {
 public:
     static const std::string Type;
 
-    Pad_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<PadAttr,
-                                             std::array<DimSize_t, 2*DIM>,
-                                             PadBorderType,
-                                             double>;
+                                            std::array<DimSize_t, 2*DIM>,
+                                            PadBorderType,
+                                            double>;
     template <PadAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    Pad_Op() = delete;
 
     constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                      const PadBorderType &borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
-          Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
-                           attr<PadAttr::BorderType>(borderType),
-                           attr<PadAttr::BorderValue>(borderValue)) {}
+          mAttributes(std::make_shared<Attributes_>(
+            attr<PadAttr::BeginEndBorders>(beginEndTuples),
+            attr<PadAttr::BorderType>(borderType),
+            attr<PadAttr::BorderValue>(borderValue))) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -62,7 +63,7 @@ public:
      */
     Pad_Op(const Pad_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {}
 
     /**
@@ -80,9 +81,9 @@ public:
             const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
 
             for (std::size_t dim = 0; dim < DIM; ++dim) {
-                outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
+                outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
                                     + inputDims[dim+2]
-                                    + this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
+                                    + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
             }
             outputDims[1] = inputDims[1];
             outputDims[0] = inputDims[0];
@@ -98,6 +99,11 @@ public:
         mOutputs[0]->setBackend(name, device);
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); }
+    inline PadBorderType& borderType() const noexcept { return mAttributes->template getAttr<PadAttr::BorderType>(); }
+    inline double& borderValue() const noexcept { return mAttributes->template getAttr<PadAttr::BorderValue>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index cdc2d21b5a5daabd1fcead1d5f5bff4432207e00..575d56b455940ea98571110dbaa9a83de09fef37 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -34,18 +34,19 @@ public:
 enum class PopAttr { ForwardStep };
 
 class Pop_Op : public OperatorTensor,
-    public Registrable<Pop_Op, std::string, std::unique_ptr<OperatorImpl>(const Pop_Op&)>,
-    public StaticAttributes<PopAttr, unsigned int> {
+    public Registrable<Pop_Op, std::string, std::unique_ptr<OperatorImpl>(const Pop_Op&)> {
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<PopAttr, unsigned int>;
-    template <PopAttr e>
-    using attr = typename Attributes_::template attr<e>;
+private:
+    using Attributes_ = StaticAttributes<PopAttr, std::uint32_t>;
+    template <PopAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
+public:
     Pop_Op()
         : OperatorTensor(Type, {InputCategory::Data}, 1),
-          Attributes_(attr<PopAttr::ForwardStep>(0))
+          mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0)))
     {
         mImpl = std::make_shared<Pop_OpImpl>(*this);
     }
@@ -56,7 +57,7 @@ public:
      */
     Pop_Op(const Pop_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Pop_Op, *this, op.backend());
@@ -80,6 +81,9 @@ public:
     void updateConsummerProducer() override;
     void forward() override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<PopAttr::ForwardStep>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 4206331a2bea9f4ba51ffef6fc17447a23735951..9e3bdd1ba2f601da27dea3a6a01131a0c8191eb4 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -31,20 +31,24 @@ enum class ProdAttr { Constant };
 class Producer_Op
     : public OperatorTensor,
       public Registrable<Producer_Op, std::string, std::shared_ptr<OperatorImpl>(
-                                          const Producer_Op &)>,
-      public StaticAttributes<ProdAttr, bool> {
+                                          const Producer_Op &)> {
 public:
     static const std::string Type;
 
+private:
     using Attributes_ = StaticAttributes<ProdAttr, bool>;
-    template <ProdAttr e>
-    using attr = typename Attributes_::template attr<e>;
+    template <ProdAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Producer_Op() = delete;
 
     template <std::size_t DIM>
     Producer_Op(const std::array<DimSize_t, DIM>& dims,
                 bool constant = false)
         : OperatorTensor(Type, {}, 1),
-          Attributes_(attr<ProdAttr::Constant>(constant))
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ProdAttr::Constant>(constant)))
     {
         mOutputs[0]->resize(dims);
         mImpl = std::make_shared<OperatorImpl>(*this);
@@ -95,6 +99,9 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {};
     }
@@ -109,7 +116,7 @@ public:
     }
 
     void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) override {
-        if (getAttr<ProdAttr::Constant>()) {
+        if (mAttributes->template getAttr<ProdAttr::Constant>()) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
         }
         OperatorTensor::setOutput(outputIdx, data);
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index b975a96ab3adea5998cf4e21156c101dad3c8867..3fcf19ffd13645fb28b6efcfefaf8e347b148c89 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -29,22 +29,28 @@ namespace Aidge {
 enum class ReduceMeanAttr { Axes, KeepDims };
 
 class ReduceMean_Op : public OperatorTensor,
-                public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>,
-                public StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t> {
+                public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)> {
 
-   public:
+public:
     static const std::string Type;
 
-    ReduceMean_Op() = delete;
-
-    using Attributes_ = StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t>;
+private:
+    using Attributes_ = StaticAttributes<ReduceMeanAttr,
+                                            std::vector<std::int32_t>,
+                                            DimSize_t>;
     template <ReduceMeanAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    ReduceMean_Op() = delete;
 
     ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
-          Attributes_(attr<ReduceMeanAttr::Axes>(axes),
-                      attr<ReduceMeanAttr::KeepDims>(keep_dims)) {}
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ReduceMeanAttr::Axes>(axes),
+            attr<ReduceMeanAttr::KeepDims>(keep_dims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -52,7 +58,7 @@ class ReduceMean_Op : public OperatorTensor,
      */
     ReduceMean_Op(const ReduceMean_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
@@ -73,6 +79,11 @@ class ReduceMean_Op : public OperatorTensor,
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); }
+    inline DimSize_t& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
+
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 769a07ff3d3ad8057df009ba7de44dc6a52d445b..4ea0cca30089555ff7979f141f94e5c84f04ffa1 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -32,22 +32,26 @@ public:
 enum class ReshapeAttr { Shape, AllowZero };
 
 class Reshape_Op : public OperatorTensor,
-                   public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)>,
-                   public StaticAttributes<ReshapeAttr, std::vector<std::int64_t>, bool> {
+                   public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)> {
 
 public:
     static const std::string Type;
 
-    Reshape_Op() = delete;
+private:
+    using Attributes_ = StaticAttributes<ReshapeAttr,
+                                            std::vector<std::int64_t>,
+                                            bool>;
+    template <ReshapeAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    using Attributes_ = StaticAttributes<ReshapeAttr, std::vector<std::int64_t>,  bool>;
-    template <ReshapeAttr e>
-    using attr = typename Attributes_::template attr<e>;
+public:
+    Reshape_Op() = delete;
 
     Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
         : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
-          Attributes_(attr<ReshapeAttr::Shape>(shape),
-                      attr<ReshapeAttr::AllowZero>(allowzero))
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ReshapeAttr::Shape>(shape),
+            attr<ReshapeAttr::AllowZero>(allowzero)))
     {
         mImpl = std::make_shared<Reshape_OpImpl>(*this);
     }
@@ -58,7 +62,7 @@ public:
      */
     Reshape_Op(const Reshape_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
@@ -81,6 +85,10 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
+    std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int64_t>& shape() const { return mAttributes->template getAttr<ReshapeAttr::Shape>(); }
+    inline bool& allowZero() const { return mAttributes->template getAttr<ReshapeAttr::AllowZero>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index e6982dc138e84754b1845fcc2d90cf68cd5c8ec7..565affc57ae8e7b1838466733b0f5d8fa8e1a6d6 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -31,12 +31,17 @@ public:
     static const std::string Type;
 
     Resize_Op()
-        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData}, 1){}
-        
+        : OperatorTensor(Type,
+            {InputCategory::Data,
+                InputCategory::OptionalData,
+                InputCategory::OptionalData,
+                InputCategory::OptionalData},
+            1) {}
+
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
      * but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy. 
+     * @param op Operator to copy.
      */
 
     Resize_Op(const Resize_Op& op)
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 2cee276f82fdc999176529bb9d14002580098113..7d8e11b31546cd87a8d6b2d36e2929c9ef6df7a2 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -12,39 +12,42 @@
 #ifndef AIDGE_CORE_OPERATOR_SCALING_H_
 #define AIDGE_CORE_OPERATOR_SCALING_H_
 
+#include <cstddef>  // std::size_t
 #include <vector>
 #include <memory>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 enum class ScalingAttr {
-    scalingFactor, quantizedNbBits, isOutputUnsigned
+    ScalingFactor, QuantizedNbBits, IsOutputUnsigned
 };
 
-class Scaling_Op 
+class Scaling_Op
     : public OperatorTensor,
-      public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)>,
-      public StaticAttributes<ScalingAttr, float, size_t, bool> {
+      public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)> {
 public:
     static const std::string Type;
 
-    Scaling_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ScalingAttr, float, std::size_t, bool>;
     template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Scaling_Op() = delete;
 
     Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
-          Attributes_(
-            attr<ScalingAttr::scalingFactor>(scalingFactor),
-            attr<ScalingAttr::quantizedNbBits>(nbBits),
-            attr<ScalingAttr::isOutputUnsigned>(isOutputUnsigned))
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ScalingAttr::ScalingFactor>(scalingFactor),
+            attr<ScalingAttr::QuantizedNbBits>(nbBits),
+            attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
     {}
 
     /**
@@ -53,7 +56,7 @@ public:
      */
     Scaling_Op(const Scaling_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
@@ -72,6 +75,11 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<ScalingAttr::ScalingFactor>(); }
+    inline std::size_t& quantizedNbBits() const noexcept { return mAttributes -> getAttr<ScalingAttr::QuantizedNbBits>(); }
+    inline bool& isOutputUnsigned() const noexcept { return mAttributes -> getAttr<ScalingAttr::IsOutputUnsigned>(); }
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
@@ -85,10 +93,10 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
 }
 */
-inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, 
-                                     std::size_t quantizedNbBits=8, 
-                                     bool isOutputUnsigned=true, 
-                                     const std::string& name = "") 
+inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
+                                     std::size_t quantizedNbBits=8,
+                                     bool isOutputUnsigned=true,
+                                     const std::string& name = "")
 {
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
 }
@@ -97,7 +105,7 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
 namespace {
 template <>
 const char* const EnumStrings<Aidge::ScalingAttr>::data[]
-    = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
+    = {"ScalingFactor", "QuantizedNbBits", "IsOutputUnsigned"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index a7790201884bbd7375039ad8fc6f7ddd98e6e9b5..6d2d1b5e7c212fafa5ad6457d9e0a260e96b1c90 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -36,20 +36,24 @@ enum class ShapeAttr { Start, End };
 class Shape_Op : public OperatorTensor,
                 public Registrable<Shape_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Shape_Op&)>,
-                public StaticAttributes<ShapeAttr, std::int64_t, std::int64_t> {
+                                   std::shared_ptr<OperatorImpl>(const Shape_Op&)> {
 
 public:
     static const std::string Type;
 
-    Shape_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ShapeAttr, std::int64_t, std::int64_t>;
     template <ShapeAttr e> using attr = typename Attributes_::template attr<e>;
-    Shape_Op(std::int64_t start, std::int64_t end)
-            : OperatorTensor(Type, {InputCategory::Data}, 1),
-            Attributes_(attr<ShapeAttr::Start>(start),
-                        attr<ShapeAttr::End>(end))
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Shape_Op() = delete;
+
+    Shape_Op(const std::int64_t start, const std::int64_t end)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ShapeAttr::Start>(start),
+            attr<ShapeAttr::End>(end)))
     {
         mImpl = std::make_shared<Shape_OpImpl>(*this);
     }
@@ -60,7 +64,7 @@ public:
      */
     Shape_Op(const Shape_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Shape_Op, *this, op.backend());
@@ -82,6 +86,10 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int64_t& start() const noexcept { return mAttributes -> getAttr<ShapeAttr::Start>(); }
+    inline std::int64_t& end() const noexcept { return mAttributes -> getAttr<ShapeAttr::End>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -90,7 +98,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Shape(std::int64_t start = 0, std::int64_t end = -1, const std::string& name = "") {
+inline std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 30ac28b73bda9fda9b2a651f93e84fa9aef27f0d..7d425a0f3589e74b54ee0834fdc4291ea7f49bad 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -29,22 +29,29 @@ enum class SliceAttr { Starts, Ends, Axes, Steps };
 
 class Slice_Op
     : public OperatorTensor,
-      public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)>,
-      public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int8_t>, std::vector<std::int64_t>> {
-
+      public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)> {
 public:
     static const std::string Type;
 
+private:
+    using Attributes_ = StaticAttributes<SliceAttr,
+                                            std::vector<std::int64_t>,
+                                            std::vector<std::int64_t>,
+                                            std::vector<std::int8_t>,
+                                            std::vector<std::int64_t>>;
+    template <SliceAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
     Slice_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int8_t>, std::vector<std::int64_t>>;
-    template <SliceAttr e> using attr = typename Attributes_::template attr<e>;
     Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>& ends, const std::vector<std::int8_t>& axes, const std::vector<std::int64_t>& steps)
         : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
-          Attributes_(attr<SliceAttr::Starts>(starts),
-                      attr<SliceAttr::Ends>(ends),
-                      attr<SliceAttr::Axes>(axes),
-                      attr<SliceAttr::Steps>(steps))
+          mAttributes(std::make_shared<Attributes_>(
+            attr<SliceAttr::Starts>(starts),
+            attr<SliceAttr::Ends>(ends),
+            attr<SliceAttr::Axes>(axes),
+            attr<SliceAttr::Steps>(steps)))
     {}
 
 
@@ -55,7 +62,7 @@ public:
      */
     Slice_Op(const Slice_Op &op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Slice_Op, *this, op.backend());
@@ -73,10 +80,16 @@ public:
     std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
 
     bool dimsForwarded() const override final;
-    bool forwardDims(bool allowDataDependency = false) override final;
+    bool forwardDims(bool allowDataDependency = true) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int64_t>& starts() const noexcept { return mAttributes -> getAttr<SliceAttr::Starts>(); }
+    inline std::vector<std::int64_t>& ends() const noexcept { return mAttributes -> getAttr<SliceAttr::Ends>(); }
+    inline std::vector<std::int8_t>& axes() const noexcept { return mAttributes -> getAttr<SliceAttr::Axes>(); }
+    inline std::vector<std::int64_t>& steps() const noexcept { return mAttributes -> getAttr<SliceAttr::Steps>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "starts", "ends", "axes", "steps"};
     }
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 394250f2692cfc42594ffed610451606ab2a25df..70f3a561ae5c9ba4720de8419bcd5aaf32a51e47 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -24,24 +24,29 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class SoftmaxAttr { AxisIdx };
+enum class SoftmaxAttr { Axis };
 
 class Softmax_Op : public OperatorTensor,
                 public Registrable<Softmax_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Softmax_Op&)>,
-                public StaticAttributes<SoftmaxAttr, std::size_t> {
+                                   std::shared_ptr<OperatorImpl>(const Softmax_Op&)> {
 
 public:
     static const std::string Type;
 
+private:
+    using Attributes_ = StaticAttributes<SoftmaxAttr, std::int32_t>;
+    template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
     Softmax_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SoftmaxAttr, std::size_t>;
-    template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
-    Softmax_Op(std::size_t axis)
-            :  OperatorTensor(Type, {InputCategory::Data}, 1),
-            Attributes_(attr<SoftmaxAttr::AxisIdx>(axis)) {}
+    Softmax_Op(std::int32_t axis)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+                attr<SoftmaxAttr::Axis>(axis)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -49,7 +54,7 @@ public:
      */
     Softmax_Op(const Softmax_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
@@ -68,6 +73,10 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<SoftmaxAttr::Axis>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -76,7 +85,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Softmax(std::size_t axis, const std::string& name = "") {
+inline std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 1705cbb01157046a632c7875b7a041f6908ec495..72096448ebf0e00d73e33bdab094ca7f0b7d0633 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -27,28 +27,33 @@
 namespace Aidge {
 class TransposeImpl : public OperatorImpl {
 public:
-    TransposeImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    TransposeImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend)
+    {}
     void forward() override;
 };
 
 enum class TransposeAttr { OutputDimsOrder };
 
 class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)>,
-                public StaticAttributes<TransposeAttr, std::vector<DimSize_t>> {
+                public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)> {
 
-   public:
+public:
     static const std::string Type;
 
-    Transpose_Op() = delete;
 
+private:
     using Attributes_ = StaticAttributes<TransposeAttr, std::vector<DimSize_t>>;
-    template <TransposeAttr e>
-    using attr = typename Attributes_::template attr<e>;
+    template <TransposeAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Transpose_Op() = delete;
 
     Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
-          Attributes_(attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder))
+          mAttributes(std::make_shared<Attributes_>(
+            attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)))
     {
         mImpl = std::make_shared<TransposeImpl>(*this);
     }
@@ -59,7 +64,7 @@ class Transpose_Op : public OperatorTensor,
      */
     Transpose_Op(const Transpose_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
@@ -81,6 +86,9 @@ class Transpose_Op : public OperatorTensor,
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index 927686cfd5cca910c5ffb25364ae4bc971ad18bf..c1f6a8a7f704b4bd813983cb178d9e5acba5a5e1 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -12,15 +12,14 @@
 #ifndef AIDGE_CORE_UTILS_ATTRIBUTES_H_
 #define AIDGE_CORE_UTILS_ATTRIBUTES_H_
 
-#ifdef PYBIND
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
-#endif
-#include <vector>
 #include <string>
 #include <set>
 
 #ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <fmt/format.h>
+
 namespace py = pybind11;
 #endif
 
@@ -36,26 +35,53 @@ namespace Aidge {
 template<class T, std::size_t N>
 constexpr std::size_t size(T (&)[N]) { return N; }
 
+
 /* This abstract class allows to avoid binding Attributes.
 *  Otherwise we would need to bind every template possible of Attributes.
 *  Every operators can access the methods of this class by inheriting from
 *  Attributes in the binding code.
 */
 class Attributes {
+protected:
+    /**
+     * @brief Convert snake_case to PascalCase.
+     * @param snakeCase string to convert.
+    */
+    static std::string snakeToPascal(const std::string& snakeCase);
+
+
+    /**
+     * @brief Convert PascalCase to snake_case.
+     * @param pascalCase string to convert.
+    */
+    static std::string pascalToSnake(const std::string& pascalCase);
+
+    /**
+     * @brief Check whether a given string is in PascalCase.
+     * @param str String to check.
+     */
+    static bool isPascalCase(const std::string& str);
+
+    /**
+     * @brief Check whether a given string is in snake_case.
+     * @param str String to check.
+     */
+    static bool isSnakeCase(const std::string& str);
+
 public:
     /**
      * @brief Check if the attribute exists.
      * @param name Name of the attribute to check.
      * @return bool True if the attribute exists, false otherwise.
     */
-    virtual bool hasAttr(const std::string& name) const = 0;
+    virtual bool hasAttr(const std::string& /*name*/) const = 0;
 
     /**
      * @brief Get the (implementation defined) name of the type of an attribute, returned by std::type_info::name.
      * @param name Name of the attribute.
      * @return std::string Name of the type as returned by std::type_info::name.
     */
-    virtual std::string getAttrType(const std::string& name) const = 0;
+    virtual std::string getAttrType(const std::string& /*name*/) const = 0;
 
     /**
      * @brief Get the attribute's name list.
@@ -64,16 +90,25 @@ public:
     virtual std::set<std::string> getAttrsName() const = 0;
 
 #ifdef PYBIND
+    virtual bool hasAttrPy(const std::string& name) const = 0;
+
     /* Bindable get function, does not recquire any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from its return type.
     */
-    virtual py::object getAttrPy(const std::string& name) const = 0;
+    virtual py::object getAttrPy(const std::string& name) const  = 0;
     /* Bindable set function, does not recquire any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from ``value`` type.
     */
-    virtual void setAttrPy(const std::string& name, py::object&& value) = 0;
+    virtual void setAttrPy(const std::string& /*name*/, py::object&& /*value*/) = 0;
+
+    virtual std::string str() const = 0;
+
+    virtual std::string repr() const = 0;
+
+    virtual py::dict dict() const = 0;
+
 #endif
     virtual ~Attributes() {}
 };
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 113377b33d9827c3428eeb0adc92111f75c22abb..c5054eb2fd2e8bfa5e7fca898f343ce630643dbd 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -48,11 +48,12 @@ public:
      */
     template<class T> T& getAttr(const std::string& name)
     {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
 #ifdef PYBIND
         // If attribute does not exist in C++, it might have been created or modified in Python
         auto it = mAttrs.find(name);
         if (it == mAttrs.end()) {
-            auto itPy = mAttrsPy.find(name);
+            auto itPy = mAttrsPy.find(pascalToSnake(name));
             if (itPy != mAttrsPy.end()) {
                 // Insert the attribute back in C++
                 mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
@@ -65,11 +66,12 @@ public:
 
     template<class T> const T& getAttr(const std::string& name) const
     {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
 #ifdef PYBIND
         // If attribute does not exist in C++, it might have been created or modified in Python
         auto it = mAttrs.find(name);
         if (it == mAttrs.end()) {
-            auto itPy = mAttrsPy.find(name);
+            auto itPy = mAttrsPy.find(pascalToSnake(name));
             if (itPy != mAttrsPy.end()) {
                 // Insert the attribute back in C++
                 mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
@@ -86,6 +88,7 @@ public:
     ///\param value Attribute value
     template<class T> void addAttr(const std::string& name, const T& value)
     {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
         const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
         AIDGE_ASSERT(res.second, "attribute already exists");
 
@@ -93,7 +96,7 @@ public:
         // We cannot handle Python object if the Python interpreter is not running
         if (Py_IsInitialized()) {
             // Keep a copy of the attribute in py::object that is updated everytime
-            mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+            mAttrsPy.emplace(std::make_pair(pascalToSnake(name), py::cast(value)));
         }
 #endif
     }
@@ -129,7 +132,8 @@ public:
 #ifdef PYBIND
     void addAttrPy(const std::string& name, py::object&& value)
     {
-        auto it = mAttrs.find(name);
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python.");
+        auto it = mAttrs.find(snakeToPascal(name));
         AIDGE_ASSERT(it == mAttrs.end(), "attribute already exists");
 
         const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
@@ -138,26 +142,52 @@ public:
 
     void setAttrPy(const std::string& name, py::object&& value) override final
     {
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python.");
         auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
         if (!resPy.second)
             resPy.first->second = std::move(value);
 
         // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
-        mAttrs.erase(name);
+        const std::string pascalName = snakeToPascal(name);
+        mAttrs.erase(pascalName);
+    }
+
+    py::dict dict() const override {
+        py::dict attributes;
+        for (const auto& elt : mAttrsPy) {
+            const std::string snakeName = pascalToSnake(elt.first);
+            attributes[snakeName.c_str()] = elt.second;
+        }
+        return attributes;
+    }
+
+    std::string str() const override {
+        return repr();
     }
+
+    std::string repr() const override {
+        // Call the __repr__ method of the base class py::dict
+        return fmt::format("AttrDict({})",  static_cast<std::string>(py::str(dict())));
+        // return fmt::format("AttrDict({})",  dict().attr("__repr__")().cast<std::string>());
+    }
+
 #endif
 
     //////////////////////////////////////
     ///     Generic Attributes API
     //////////////////////////////////////
     bool hasAttr(const std::string& name) const override final {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
+        return (mAttrs.find(name) != mAttrs.cend());
+    }
+
 #ifdef PYBIND
+    bool hasAttrPy(const std::string& name) const override final {
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python.");
         // Attributes might have been created in Python, the second condition is necessary.
-        return (mAttrs.find(name) != mAttrs.end() || mAttrsPy.find(name) != mAttrsPy.end());
-#else
-        return (mAttrs.find(name) != mAttrs.end());
-#endif
+        return (mAttrs.find(snakeToPascal(name)) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
     }
+#endif
 
     std::string getAttrType(const std::string& name) const override final {
         // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is:
@@ -195,7 +225,7 @@ public:
      * generic type caster for std::any is not feasable.
      * The strategy here is to keep a copy of each attribute in py::object that is updated everytime.
     */
-    py::object getAttrPy(const std::string& name) const override final {
+    inline py::object getAttrPy(const std::string& name) const override final {
         return mAttrsPy.at(name);
     };
 #endif
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index a01f81629c8425f9d860bf1ea03bfe421dbd04fa..a400f8046d07df4ff4493470737f5c4d42945db7 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -14,6 +14,7 @@
 #define AIDGE_LOG_H_
 
 #include <memory>
+#include <vector>
 
 #include <fmt/format.h>
 #include <fmt/ranges.h>
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 6bf59155373cf73d158fce4eb5bda58f7d279e69..8fc88ff79c50751ba7b79662fc9fc430d4ed601d 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -12,11 +12,16 @@
 #ifndef AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
 #define AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
 
-#include <tuple>
+#include <array>
 #include <cassert>
 #include <cstddef>
+#include <string>
+#include <tuple>
 #include <typeinfo>
-#include <array>
+
+#ifdef PYBIND
+#include <fmt/format.h>
+#endif
 
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
@@ -149,8 +154,9 @@ public:
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
     }
 
-    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
-    constexpr typename std::enable_if<(SIZE > 0), const std::type_info&>::type getAttrType(std::size_t i) const {
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value,
+                std::enable_if_t<(SIZE > 0), bool> = true>
+    constexpr const std::type_info& getAttrType(std::size_t i) const {
         if (i == SIZE-1) {
             return typeid(typename std::tuple_element<SIZE-1,std::tuple<T...>>::type);
         }
@@ -159,8 +165,9 @@ public:
         }
     }
 
-    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
-    [[noreturn]] typename std::enable_if<(SIZE == 0), const std::type_info&>::type getAttrType(std::size_t /*i*/) const {
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value,
+                std::enable_if_t<(SIZE == 0), bool> = true>
+    [[noreturn]] const std::type_info& getAttrType(std::size_t /*i*/) const {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
     }
 
@@ -173,6 +180,7 @@ public:
     //////////////////////////////////////
     // Runtime existance check with name
     bool hasAttr(const std::string& name) const override final {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 return true;
@@ -182,6 +190,20 @@ public:
         return false;
     }
 
+#ifdef PYBIND
+        bool hasAttrPy(const std::string& name) const override final {
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python, got '{}'.", name);
+        const std::string pascalName = snakeToPascal(name);
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (pascalName == EnumStrings<ATTRS_ENUM>::data[i]) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+#endif
+
     // Runtime type access with name
     std::string getAttrType(const std::string& name) const override final {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
@@ -212,28 +234,40 @@ public:
     static std::set<std::string> staticGetAttrsName() {
         std::set<std::string> attrsName;
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            attrsName.insert(EnumStrings<ATTRS_ENUM>::data[i]);
+            attrsName.insert(pascalToSnake(std::string(EnumStrings<ATTRS_ENUM>::data[i])));
         }
         return attrsName;
     }
 
 
     py::object getAttrPy(const std::string& name) const override {
+        if (name == "__dict__") {
+            return py::none();
+        }
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python, got '{}'.", name);
+        const std::string pascalName = snakeToPascal(name);
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+            if (pascalName == EnumStrings<ATTRS_ENUM>::data[i]) {
                 // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
-                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
+                // Normal accessor would not work as we convert the tuple to a py::object which can be anything
                 return py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
             }
         }
-
-        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name);
+        // if (name == "_ipython_canary_method_should_not_exist_") {
+            // fmt::print("dict call {}", py::str(dict().attr("__getitem__")(name)).cast<std::string>());
+        // }
+        // ipython tries special methods and attributes (e.g "_ipython_canary_method_should_not_exist_") that require to throw
+        throw py::attribute_error(fmt::format("attribute \"{}\" not found.", name));
+        // AIDGE_THROW_OR_ABORT(py::key_error, "attribute \"{}\" not found in Python attribute getter", name);
+        // return py::none();
     }
 
 
     void setAttrPy(const std::string& name, py::object&& value) override final{
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python, got '{}'.", name);
+        const std::string pascalName = snakeToPascal(name);
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+            if (pascalName == EnumStrings<ATTRS_ENUM>::data[i]) {
                 // Cannot update attribute using reference has it would require templating
                 // Use a dirty
                 auto tmpAttr = py::cast(mAttrs);
@@ -242,8 +276,42 @@ public:
                 return;
             }
         }
-        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name);
+        throw py::attribute_error(fmt::format("attribute \"{}\" not found.", name));
+    }
+
+    py::dict dict() const override {
+        py::dict attributes;
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            const std::string snakeName = pascalToSnake(EnumStrings<ATTRS_ENUM>::data[i]);
+                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
+                // Normal accessor would not work as we convert the tuple to a py::object which can be anything
+            attributes[snakeName.c_str()] = py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+        }
+        return attributes;
     }
+
+    std::string str() const override {
+        return repr();
+    }
+
+    std::string repr() const override {
+        // Call the __repr__ method of the base class py::dict
+        return fmt::format("AttrDict({})",  static_cast<std::string>(py::str(dict())));
+        // return fmt::format("AttrDict({})",  dict().attr("__repr__")().cast<std::string>());
+    }
+
+    std::size_t len() const {
+        return size(EnumStrings<ATTRS_ENUM>::data);
+    }
+    // AttrDict get_a() const {
+    //     py::dict attributes_;
+    //     for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+    //         const std::string snakeName = pascalToSnake(std::string(EnumStrings<ATTRS_ENUM>::data[i]));
+    //         attributes_[snakeName.c_str()] = py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+    //     }
+    //     return AttrDict(attributes_);
+    // }
+
     #endif
 
 private:
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index 955b510e6cce6712e4738c0064836dbb733a3c3d..c6595360b17ee08eaa82d483987914adc67b60a8 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -10,6 +10,7 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+
 #include "aidge/data/Data.hpp"
 
 namespace py = pybind11;
@@ -17,18 +18,18 @@ namespace Aidge {
 
 void init_Data(py::module& m){
     // TODO : extend with more values !
-    py::enum_<DataType>(m, "DataType")
-    .value("Float64", DataType::Float64)
-    .value("Float32", DataType::Float32)
-    .value("Float16", DataType::Float16)
-    .value("Int8", DataType::Int8)
-    .value("Int16", DataType::Int16)
-    .value("Int32", DataType::Int32)
-    .value("Int64", DataType::Int64)
-    .value("UInt8", DataType::UInt8)
-    .value("UInt16", DataType::UInt16)
-    .value("UInt32", DataType::UInt32)
-    .value("UInt64", DataType::UInt64)
+    py::enum_<DataType>(m, "dtype")
+    .value("float64", DataType::Float64)
+    .value("float32", DataType::Float32)
+    .value("float16", DataType::Float16)
+    .value("int8", DataType::Int8)
+    .value("int16", DataType::Int16)
+    .value("int32", DataType::Int32)
+    .value("int64", DataType::Int64)
+    .value("uint8", DataType::UInt8)
+    .value("uint16", DataType::UInt16)
+    .value("uint32", DataType::UInt32)
+    .value("uint64", DataType::UInt64)
     ;
 
     py::class_<Data, std::shared_ptr<Data>>(m,"Data");
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 185e4771295f96cc02adfa3d669ffbb558195ca0..83bb4afeacdd6de181fd6738edad2229736854c8 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -92,10 +92,14 @@ void init_Tensor(py::module& m){
     .def("get_idx", &Tensor::getIdx)
     .def_static("get_available_backends", &Tensor::getAvailableBackends)
     .def("__str__", [](Tensor& b) {
-        return b.toString();
+        if (b.empty()) {
+            return std::string("{}");
+        } else {
+            return b.toString();
+        }
     })
     .def("__repr__", [](Tensor& b) {
-        return "Tensor(dtype = " + std::string(EnumStrings<DataType>::data[static_cast<int>(b.dataType())]) + ",\n" + b.toString() + ")";
+        return fmt::format("Tensor(dims = {}, dtype = {})", b.dims(), std::string(EnumStrings<DataType>::data[static_cast<int>(b.dataType())]));
     })
     .def("__len__", [](Tensor& b) -> size_t{
         return b.size();
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 4e74be8878eb3ca081fd2d5457e42768f4026be5..2930383817d1555d51b8bddd8eff6402240e905a 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -34,6 +34,8 @@ void init_GraphView(py::module& m) {
           .def("in_view", (bool (GraphView::*)(const std::string&) const) &GraphView::inView)
           .def("root_node", &GraphView::rootNode)
           .def("set_root_node", &GraphView::setRootNode, py::arg("node"))
+          .def("__repr__", &GraphView::repr)
+          .def("__len__", [](const GraphView& g){ return g.getNodes().size(); })
           .def("log_outputs", &GraphView::logOutputs, py::arg("path"))
           .def("get_ordered_inputs", &GraphView::getOrderedInputs)
           .def("get_ordered_outputs", &GraphView::getOrderedOutputs)
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 0153dc7452bfee6c5d8aa4d7c4363b24dc523e0f..06c171214d5df261e5df832179a0fa69420aab7d 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -48,6 +48,8 @@ void init_Node(py::module& m) {
     :rtype: str
     )mydelimiter")
 
+    .def("__repr__", &Node::repr)
+
     .def("add_child",
          (void (Node::*)(std::shared_ptr<Node>, const IOIndex_t, IOIndex_t)) &
                  Node::addChild,
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 966def88033dee8cd6cee06d80dc32114050b430..0587554b722c99d009a248ce963f80cb4fd892ec 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -21,22 +21,31 @@
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/StaticAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
+
   const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D");
-  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
+  const std::string pyStaticAttrClassName("StaticAttributes" + pyClassName);
+//   py::class_<StaticAttributes<AvgPoolingAttr,
+//                                              std::array<DimSize_t, DIM>,
+//                                              std::array<DimSize_t, DIM>>,
+//     std::shared_ptr<StaticAttributes<AvgPoolingAttr,
+//                                              std::array<DimSize_t, DIM>,
+//                                              std::array<DimSize_t, DIM>>>, Attributes>(m, pyStaticAttrClassName.c_str());
+
+  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor>(
         m, pyClassName.c_str(),
         py::multiple_inheritance())
     .def(py::init<const std::array<DimSize_t, DIM> &,
                   const std::array<DimSize_t, DIM> &>(),
             py::arg("kernel_dims"),
-            py::arg("stride_dims"))
+            py::arg("stride_dims") = create_array<DimSize_t,DIM>(1))
     .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
-    .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
+    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName);
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
 
@@ -54,6 +63,9 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 
 
 void init_AvgPooling(py::module &m) {
+//   py::enum_<AvgPoolingAttr>(m, "_AvgPoolingAttr")
+    // .value("kernel_dims", AvgPoolingAttr::KernelDims)
+    // .value("stride_dims", AvgPoolingAttr::StrideDims);
   declare_AvgPoolingOp<1>(m);
   declare_AvgPoolingOp<2>(m);
   declare_AvgPoolingOp<3>(m);
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 4ec25e02a50330bdf764b598b598836a251d65ea..42e31de2c7c8ba440cd8e479cf9285b398970b42 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -9,9 +9,10 @@
  *
  ********************************************************************************/
 
-#include <pybind11/pybind11.h>
 #include <string>
 
+#include <pybind11/pybind11.h>
+
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
@@ -23,13 +24,13 @@ namespace Aidge {
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
     const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance())
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance())
         .def(py::init<float, float>(),
             py::arg("epsilon"),
             py::arg("momentum"))
         .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
-        .def_static("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
+        .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
 
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 07bb9f2fc16fcbefb693aeec00c380661f4a6e44..9f02e04a41b20599a6cfe878f53db04c6d5bbe34 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -20,13 +20,12 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Concat(py::module& m) {
-    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, Attributes, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
-        .def(py::init<const IOIndex_t, const DimSize_t>(),
+    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
+        .def(py::init<const IOIndex_t, const int>(),
                 py::arg("nb_inputs"),
                 py::arg("axis"))
         .def_static("get_inputs_name", &Concat_Op::getInputsName)
-        .def_static("get_outputs_name", &Concat_Op::getOutputsName)
-        .def_static("attributes_name", &Concat_Op::staticGetAttrsName);
+        .def_static("get_outputs_name", &Concat_Op::getOutputsName);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
 
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 4a9f588be98bf6e883840ef798273a648e192c3d..61fb37e788021757fa6c3aced9a5f4c30fb60548 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -27,7 +27,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
   const std::string pyClassName("ConvOp" + std::to_string(DIM) + "D");
-  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
+  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
         .def(py::init([](const std::vector<DimSize_t>& kernel_dims,
@@ -43,7 +43,6 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
             py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
         .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
-        .def_static("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
         .def("in_channels", &Conv_Op<DIM>::inChannels)
         .def("out_channels", &Conv_Op<DIM>::outChannels)
         ;
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 086df936489dcfffdc0d4e1aae69bea8d44c39d0..080df1832bf92a9db9d26e1fa18b652dc70c2a42 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -28,7 +28,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
   const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D");
-  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
+  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
@@ -39,7 +39,6 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         py::arg("dilation_dims"))
   .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
-  .def_static("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName)
   .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels);
 
   declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 4184af039504466a49e1451f708ac2cd329f328e..9e0d61bc3a4d957e98db39577e120da5fe97ebea 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -15,17 +15,28 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
+
+
 void declare_FC(py::module &m) {
   py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &FC_Op::getInputsName)
     .def_static("get_outputs_name", &FC_Op::getOutputsName)
-    .def("out_channels", &FC_Op::outChannels);
+    .def("out_channels", &FC_Op::outChannels)
+    // .def_property_readonly("a", &FC_Op::get_a)
+    // .def_property_readonly("a", [](const FC_Op& self) {
+    //     const AttrDict a = AttrDict(self.get_a());
+    //     return a;
+    // })
+    .def("__repr__", [](FC_Op& b) {
+        return fmt::format("Operator(type='{}')", b.Type);
+    });
 
   declare_registrable<FC_Op>(m, "FCOp");
 
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 83891624deede4b1f6f6f0c649358e9ed8de0a24..aa831d1cfe92fb720df00bb7d8dd3af7f1c1a668 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -21,7 +21,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Gather(py::module& m) {
-    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, Attributes, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
+    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
         .def(py::init<std::int8_t,
                       const std::vector<int64_t>,
                       const std::vector<DimSize_t>>(),
@@ -29,8 +29,7 @@ void init_Gather(py::module& m) {
                 py::arg("indices"),
                 py::arg("gathered_shape"))
         .def_static("get_inputs_name", &Gather_Op::getInputsName)
-        .def_static("get_outputs_name", &Gather_Op::getOutputsName)
-        .def_static("attributes_name", &Gather_Op::staticGetAttrsName);
+        .def_static("get_outputs_name", &Gather_Op::getOutputsName);
 
     declare_registrable<Gather_Op>(m, "GatherOp");
 
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 7078ca3b0e84d7251aadbc6035e348ac9cd72571..6af8fef88e411af0a3ecbe5a771bf7af24de411a 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -11,6 +11,7 @@
 
 #include <stdio.h>
 
+#include <memory>
 #include <string>
 
 #include <pybind11/functional.h>
@@ -27,7 +28,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
-    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp",
+    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, OperatorTensor>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
         .def(py::init<const std::string&, IOIndex_t, IOIndex_t, IOIndex_t>(),
                 py::arg("type"),
@@ -54,10 +55,11 @@ void init_GenericOperator(py::module& m) {
             );
             if (kwargs){
                 std::shared_ptr<GenericOperator_Op> gop = std::static_pointer_cast<GenericOperator_Op>(genericNode->getOperator());
+                std::shared_ptr<DynamicAttributes> attr = std::dynamic_pointer_cast<DynamicAttributes>(gop->attributes());
                 for (auto item : kwargs) {
                     std::string key = py::cast<std::string>(item.first);
                     py::object value = py::reinterpret_borrow<py::object>(item.second);
-                    gop->setAttrPy(key, std::move(value));
+                    attr->setAttrPy(key, std::move(value));
                 }
             }
             return genericNode;
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index b859b3be5b3dd2606d227a3ca26bd1b4eb8e75a9..f46106fb3fb168631c9681d90bda857183c9bc04 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -19,11 +19,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Attributes, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
         .def(py::init<float>(), py::arg("negative_slope"))
         .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
-        .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
-        .def_static("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
+        .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName);
     declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index befba918dff37e7d47a76c0c71bf48008244c2d0..2a850cd7bfe5cca21ea1ca54b5e9ad86b880bcc2 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -27,7 +27,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D");
-  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
@@ -37,8 +37,7 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("stride_dims"),
         py::arg("ceil_mode"))
   .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
-  .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
-  .def_static("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
+  .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
   declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 9c98c078a7e8364855b677b17cb4bb33cdea4339..2b2f30f14931fd041bfb4ec1a712e5c9419fdf22 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -35,6 +35,7 @@ void init_Operator(py::module& m){
         .value("OptionalParam", InputCategory::OptionalParam);
 
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
+    .def("__repr__", &Operator::repr)
     .def("backend", &Operator::backend)
     .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput), py::arg("outputIdx"), py::arg("data"))
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
@@ -60,6 +61,7 @@ void init_Operator(py::module& m){
     .def("get_impl", &Operator::getImpl)
     .def("get_hook", &Operator::getHook)
     .def("add_hook", &Operator::addHook)
+    .def_property_readonly("attr", &Operator::attributes)
     ;
 }
 }
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 88d5fb4612851551dc844f26373719f13caa7962..3df203ed52967e3dbc393769276015a7fe0e016f 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -26,7 +26,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
   const std::string pyClassName("PadOp" + std::to_string(DIM) + "D");
-  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
+  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, 2*DIM> &,
@@ -37,7 +37,6 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("borderValue") = 0.0)
     .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
     .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
-    .def_static("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
     ;
   declare_registrable<Pad_Op<DIM>>(m, pyClassName);
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index d8873636d029435706cfb9766262ae0b8409d8a5..0c3b3f38803735d2df632496382e86a0c9f2735d 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -19,7 +19,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Pop(py::module& m) {
-    py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor, Attributes>(m, "PopOp", py::multiple_inheritance())
+    py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor>(m, "PopOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Pop_Op::getInputsName)
     .def_static("get_outputs_name", &Pop_Op::getOutputsName);
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 71347554fdc9cd937b1f14df16e370db2f77a267..30279dc477a0badbd5dc361ef7b5d071fa7b8cbc 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -31,15 +31,14 @@ void declare_Producer(py::module &m) {
 
 
 void init_Producer(py::module &m) {
-    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, Attributes, OperatorTensor>(
+    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, OperatorTensor>(
         m,
         "ProducerOp",
         py::multiple_inheritance())
-    .def(py::init<const std::shared_ptr<Tensor>, bool>(), py::arg("tensor"), py::arg("constant"))
-    .def("dims", &Producer_Op::dims)
-    .def_static("get_inputs_name", &Producer_Op::getInputsName)
-    .def_static("get_outputs_name", &Producer_Op::getOutputsName)
-    .def_static("attributes_name", &Producer_Op::staticGetAttrsName);
+        .def(py::init<const std::shared_ptr<Tensor>, bool>(), py::arg("tensor"), py::arg("constant"))
+        .def("dims", &Producer_Op::dims)
+        .def_static("get_inputs_name", &Producer_Op::getInputsName)
+        .def_static("get_outputs_name", &Producer_Op::getOutputsName);
 
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(
                                         const std::shared_ptr<Tensor>,
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 00201c9bdf4ecd7ad76202c2fe78180317b736dd..3023c077e2f3695902ca76dfa21831749f0ca82e 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -26,12 +26,11 @@ namespace Aidge {
 
 void declare_ReduceMeanOp(py::module &m) {
   const std::string pyClassName("ReduceMeanOp");
-  py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, Attributes, OperatorTensor>(
+  py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, OperatorTensor>(
     m, pyClassName.c_str(), py::multiple_inheritance())
     .def(py::init<std::vector<std::int32_t>, DimSize_t>(), py::arg("axes"), py::arg("keep_dims"))
     .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
-    .def_static("attributes_name", &ReduceMean_Op::staticGetAttrsName)
     ;
   declare_registrable<ReduceMean_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index 5a07de2f00399b761c0652e5dcdccdc0d49938de..89d93134ac2f590bcb067aa6936081c16fc1e2a3 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -19,10 +19,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Reshape(py::module& m) {
-    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, Attributes, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
-    .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
-    .def_static("get_inputs_name", &Reshape_Op::getInputsName)
-    .def_static("get_outputs_name", &Reshape_Op::getOutputsName);
+    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
+        .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
+        .def_static("get_inputs_name", &Reshape_Op::getInputsName)
+        .def_static("get_outputs_name", &Reshape_Op::getOutputsName);
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
     m.def("Reshape", &Reshape, py::arg("shape") = std::vector<std::int64_t>(), py::arg("allowzero") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index 0660cdb003ed4d5946f54786c0a51d9051d83d5a..31e6c0b08194fbb8b6ec2270e8127a2f838ba78f 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -21,11 +21,10 @@ namespace Aidge {
 
 void init_Scaling(py::module& m)
 {
-    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, Attributes, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
-    .def(py::init<float, size_t, bool>(), py::arg("scaling_factor"), py::arg("nb_bits"), py::arg("is_output_unsigned"))
-    .def_static("get_inputs_name", &Scaling_Op::getInputsName)
-    .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
-    .def_static("attributes_name", &Scaling_Op::staticGetAttrsName);
+    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
+        .def(py::init<float, size_t, bool>(), py::arg("scaling_factor"), py::arg("nb_bits"), py::arg("is_output_unsigned"))
+        .def_static("get_inputs_name", &Scaling_Op::getInputsName)
+        .def_static("get_outputs_name", &Scaling_Op::getOutputsName);
     declare_registrable<Scaling_Op>(m, "ScalingOp");
     m.def("Scaling", &Scaling, py::arg("scaling_factor") = 1.0f, py::arg("nb_bits") = 8, py::arg("is_output_unsigned") = true, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index dbae1d95d81ef65d27167bcd0774366dcc41b325..4e1d4203e48f714746587c9f209b4d28bfecb439 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -9,11 +9,10 @@
  *
  ********************************************************************************/
 
+#include <cstdint>  // std::int64_t
+
 #include <pybind11/pybind11.h>
-#include <string>
-#include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Shape.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 
@@ -21,14 +20,13 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Shape(py::module& m) {
-    py::class_<Shape_Op, std::shared_ptr<Shape_Op>, Attributes, OperatorTensor>(m, "ShapeOp", py::multiple_inheritance())
-        .def(py::init<std::int64_t,
-                      std::int64_t>(),
+    py::class_<Shape_Op, std::shared_ptr<Shape_Op>, OperatorTensor>(m, "ShapeOp", py::multiple_inheritance())
+        .def(py::init<const std::int64_t,
+                      const std::int64_t>(),
                 py::arg("start"),
                 py::arg("end"))
         .def_static("get_inputs_name", &Shape_Op::getInputsName)
-        .def_static("get_outputs_name", &Shape_Op::getOutputsName)
-        .def_static("attributes_name", &Shape_Op::staticGetAttrsName);
+        .def_static("get_outputs_name", &Shape_Op::getOutputsName);
 
     declare_registrable<Shape_Op>(m, "ShapeOp");
 
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index becb6f35fb7413c042f6a902aadb602e4547ee01..f27e469d84c463ec48d1f9484807a8c93b7a5f4d 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -20,11 +20,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Softmax(py::module& m) {
-    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Attributes, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
-    .def(py::init<std::size_t>(), py::arg("axis"))
-    .def_static("get_inputs_name", &Softmax_Op::getInputsName)
-    .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
-    .def_static("attributes_name", &Softmax_Op::staticGetAttrsName);
+    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
+        .def(py::init<std::size_t>(), py::arg("axis"))
+        .def_static("get_inputs_name", &Softmax_Op::getInputsName)
+        .def_static("get_outputs_name", &Softmax_Op::getOutputsName);
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index f3c000291dfca954bbed93b9400ac0bd8df8025b..c0c3ad617bef3eda3e283667944ac423cd10a622 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -27,12 +27,11 @@ namespace Aidge {
 
 void declare_Transpose(py::module &m) {
   const std::string pyClassName("TransposeOp");
-  py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, Attributes, OperatorTensor>(
+  py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, OperatorTensor>(
     m, "TransposeOp", py::multiple_inheritance())
-  .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order"))
-  .def_static("get_inputs_name", &Transpose_Op::getInputsName)
-  .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
-  .def_static("attributes_name", &Transpose_Op::staticGetAttrsName);
+    .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order"))
+    .def_static("get_inputs_name", &Transpose_Op::getInputsName)
+    .def_static("get_outputs_name", &Transpose_Op::getOutputsName);
   declare_registrable<Transpose_Op>(m, pyClassName);
   m.def("Transpose", &Transpose, py::arg("output_dims_order"), py::arg("name") = "");
 }
diff --git a/python_binding/utils/pybind_Attributes.cpp b/python_binding/utils/pybind_Attributes.cpp
index bfce891176822a3b1c07b1ded0c46c9c94a43c0a..7f5dde63c4835eb694d5fd2d571d7c9c1fd5a9ac 100644
--- a/python_binding/utils/pybind_Attributes.cpp
+++ b/python_binding/utils/pybind_Attributes.cpp
@@ -1,31 +1,47 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
+
 DynamicAttributes test_DynamicAttributes_binding() {
     DynamicAttributes attrs;
-    attrs.addAttr<int>("a", 42);
-    attrs.addAttr<std::string>("b", "test");
-    attrs.addAttr<std::vector<bool>>("c", {true, false, true});
+    attrs.addAttr<int>("A", 42);
+    attrs.addAttr<std::string>("B", "test");
+    attrs.addAttr<std::vector<bool>>("C", {true, false, true});
     return attrs;
 }
 
 double test_DynamicAttributes_binding_check(DynamicAttributes& attrs) {
-    return attrs.getAttr<double>("d");
+    return attrs.getAttr<double>("D");
 }
 
 void init_Attributes(py::module& m){
     py::class_<Attributes, std::shared_ptr<Attributes>>(m, "Attributes")
-    .def("has_attr", &Attributes::hasAttr, py::arg("name"))
-    .def("get_attr_type", &Attributes::getAttrType, py::arg("name"))
-    .def("get_attrs_name", &Attributes::getAttrsName)
+    .def("has_attr", &Attributes::hasAttrPy, py::arg("name"))
     .def("get_attr", &Attributes::getAttrPy, py::arg("name"))
     .def("__getattr__", &Attributes::getAttrPy, py::arg("name"))
     .def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
-    .def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"));
+    .def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
+    .def("dict", &Attributes::dict)
+    .def("__str__", &Attributes::str)
+    .def("__repr__", &Attributes::repr);
+
 
     py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
     .def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
@@ -35,5 +51,4 @@ void init_Attributes(py::module& m){
     m.def("test_DynamicAttributes_binding_check", &test_DynamicAttributes_binding_check, py::arg("attrs"));
 }
 
-}
-
+} // namespace Aidge
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 25f1a01877835be3d89ecb969019dd41dfb2753e..5a11aa20e03bef274f784788dee1ef047cafba42 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -404,14 +404,21 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
 }
 
 bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) {
+    // remove current Data connections and use dummy inputs to propagate dimensions
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
     if (!dims.empty()){
       AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of given dimensions ({}) and graph inputs ({})", dims.size(), mInputNodes.size());
       for (std::size_t i = 0; i < dims.size(); ++i){
-        auto tensor = std::make_shared<Tensor>(dims[i]);
-        mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
+        const auto& currentTensorPtr =
+            std::dynamic_pointer_cast<OperatorTensor>(mInputNodes[i].first->getOperator())->getInput(mInputNodes[i].second);
+        if (currentTensorPtr) { // tensor detected
+            AIDGE_ASSERT(currentTensorPtr->dims() == dims[i], "Tensor of unexpected size provided.")
+        } else {
+            auto tensor = std::make_shared<Tensor>(dims[i]);
+            mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
+        }
       }
     }
 
@@ -1055,6 +1062,10 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
                       for (const auto& child : outputChildren[i]) {
                         inputParents[i].first -> addChild(child.first, inputParents[i].second, child.second);
                       }
+                    } else {
+                      for (const auto& child : outputChildren[i]) {
+                        child.first->getOperator()->resetInput(child.second);
+                      }
                     }
                 }
             }
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 82d3eec9dfd55f03c863dcc47442d011f07a3955..53ffb93269e79c0ba940f1fb0d3d94cb494ad8ce 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -27,7 +27,10 @@ template <Aidge::DimIdx_t DIM>
 const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
 
 template <Aidge::DimIdx_t DIM>
-Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op): OperatorTensor(op), Attributes_(op) {
+Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
     if (op.mImpl) {
         SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend());
     } else {
@@ -43,11 +46,11 @@ bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
         outputDims[0] = inputDims[0];
         outputDims[1] = inputDims[1];
 
-        for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
             outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                         std::floor(static_cast<float>(inputDims[dim+2] -
-                                                                this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
-                                        static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
+                                                            mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
+                                        static_cast<float>(mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
         }
         getOutput(0)->resize(outputDims);
         return true;
@@ -85,10 +88,10 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz
 
         for (DimIdx_t i = 0; i < DIM; ++i) {
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        * mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
                         + 1
-                        + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
-            inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
+                        + (mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
+            inputIdxDims[2+i] *= mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
         }
         std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
         res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index 5fab77d5a389313fd5423302d3d6be12e6c7c4be..98e5c2da20fc35e18d4fd69a79cf1d87ec9d60ca 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -27,7 +27,10 @@ template <Aidge::DimIdx_t DIM>
 const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm";
 
 template <Aidge::DimIdx_t DIM>
-Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op): OperatorTensor(op), Attributes_(op) {
+Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
     if (op.mImpl) {
         SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend());
     } else {
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index f1c8e25e17c80d58d444a1ddddbaa428b2fc4c41..8df153a67d2214e4435d9fa0aac6e74d53e11b12 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -27,6 +27,16 @@ void Aidge::Cast_OpImpl::forward() {
 
 const std::string Aidge::Cast_Op::Type = "Cast";
 
+Aidge::Cast_Op::Cast_Op(const DataType targetType)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+        attr<CastAttr::TargetType>(targetType)))
+{
+    mImpl = std::make_shared<Cast_OpImpl>(*this);
+    mOutputs[0]->setDataType(targetType);
+}
+
+
 void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Cast_Op>::exists({name})) {
         SET_IMPL_MACRO(Cast_Op, *this, name);
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 507a5e899ac18d6932488ebc981a7a88dcd676d4..bf4bbb85be606fc857bf8d771b9ce211ca8e858e 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -20,7 +20,7 @@
 
 void Aidge::Concat_OpImpl::forward() {
     const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
-    const DimSize_t axis = op.template getAttr<DimSize_t>("Axis");
+    const DimSize_t axis = op.axis();
 
     assert(op.getInput(0) && "missing input in Concat operator");
     DataType datatypeFirstInput = op.getInput(0)->dataType();
@@ -60,31 +60,47 @@ void Aidge::Concat_OpImpl::forward() {
 const std::string Aidge::Concat_Op::Type = "Concat";
 
 bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (inputsAssociated()) {
-        AIDGE_ASSERT(getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims(), "Concat: Axis ({}) out of range ({})",
-            getAttr<ConcatAttr::Axis>(), getInput(0)->nbDims());
+    if (!inputsAssociated()) {
+        return false;
+    }
+    const std::size_t nbDimsInput0 = getInput(0)->nbDims();
+    if (nbDimsInput0 == 0) {
+        return false;
+    }
+    AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is empty", type());
+    for (IOIndex_t i = 1; i < nbInputs(); ++i) {
+        if (getInput(i)->nbDims() == 0) {
+            return false;
+        }
+        AIDGE_ASSERT(nbDimsInput0 == getInput(i)->nbDims(),
+            "Input 0 and input {} in {} Operator have different number of dimensions: {} / {}",
+            i, type(), nbDimsInput0, getInput(i)->nbDims());
+    }
+    // Check validity of attributes with inputs
+    // Axis
+    std::int32_t axis = mAttributes->template getAttr<ConcatAttr::Axis>();
+    axis = (axis < 0) ? axis + static_cast<std::int32_t>(nbDimsInput0) : axis;
+    AIDGE_ASSERT(((axis >= 0) && (axis < static_cast<std::int32_t>(nbDimsInput0))),
+                "'Axis' attribute not compatible with provided inputs.")
+    const std::size_t axis_u64 = static_cast<std::size_t>(axis);
 
-        auto outputDims =  getInput(0)->dims();
-        const auto firstInputNbDims = getInput(0) -> nbDims();
-        for (IOIndex_t i = 1; i < nbInputs(); ++i) {
-            if (getInput(i)->nbDims() == firstInputNbDims) {
-                for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
-                    if (dim == getAttr<ConcatAttr::Axis>()) {
-                        outputDims[dim] += getInput(i)->dims()[dim];
-                    }
-                    else {
-                        AIDGE_ASSERT(getInput(i)->dims()[dim] == outputDims[dim], "Concat: input #{} dim #{} ({}) must match value {}",
-                            i, dim, getInput(i)->dims()[dim], outputDims[dim]);
-                    }
-                }
+    // Check validity of inputs
+    auto outputDims =  getInput(0)->dims();
+    for (IOIndex_t i = 1; i < nbInputs(); ++i) {
+        for (DimSize_t dim = 0; dim < nbDimsInput0; ++dim) {
+            if (dim == axis_u64) {
+                outputDims[axis_u64] += getInput(i)->dims()[axis_u64];
+            }
+            else {
+                AIDGE_ASSERT(getInput(i)->dims()[dim] == outputDims[dim],
+                    "Incomatible dimensions between input 0 {} and input {} {}",
+                    getInput(0)->dims(), i, getInput(i)->dims());
             }
         }
-
-        getOutput(0)->resize(outputDims);
-        return true;
     }
 
-    return false;
+    getOutput(0)->resize(outputDims);
+    return true;
 }
 
 void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index c17a2830ccece2f7a0b4960e68002f089410a0b4..a33af78779971e77da4f4e910b89b9263a1af5d6 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -29,7 +29,7 @@ const std::string Aidge::Conv_Op<DIM>::Type = "Conv";
 template <Aidge::DimIdx_t DIM>
 Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
     : OperatorTensor(op),
-      Attributes_(op)
+      mAttributes(op.mAttributes)
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
@@ -56,14 +56,14 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
         std::array<DimSize_t, DIM + 2> outputDims{};
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
-        for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
-            const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
-                                                    (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
+            const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] *
+                                                    (mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
                                             1;
 
             outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                     floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                            static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
+                            static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim])));
         }
 
         outputDims[1] = outChannels();
@@ -106,18 +106,18 @@ Aidge::Conv_Op<DIM>::computeReceptiveField(
         std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
         for (DimIdx_t i = 0; i < DIM; ++i) {
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        * mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
                         + 1
-                        + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-            inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
+                        + (mAttributes->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * mAttributes->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
         }
 
         // Weight
         // same output value, every input channel is used
         std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
         for (std::size_t i = 0; i < DIM; ++i) {
-            weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
+            weightDims.push_back(mAttributes->template getAttr<ConvAttr::KernelDims>()[i]);
         }
         std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
         weightIdxDims[0] = firstEltDims[1];
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index acd845909d399389a2113b63806e1bbb94b4fb89..342fd86195d5c2e85a63d990c4ebbb75e7f50a6b 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -30,7 +30,7 @@ const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
 template <Aidge::DimIdx_t DIM>
 Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
     : OperatorTensor(op),
-      Attributes_(op)
+      mAttributes(op.mAttributes)
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
@@ -57,14 +57,14 @@ bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
         std::array<DimSize_t, DIM + 2> outputDims = {};
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
-        for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
-            const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
-                                                    (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
+            const DimSize_t kernelExtent = mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
+                                                    (mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
                                             1;
 
             outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                     floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                            static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
+                            static_cast<float>(mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
         }
 
         outputDims[1] = inputDims[1];
@@ -106,17 +106,17 @@ Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
         std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
         for (DimIdx_t i = 0; i < DIM; ++i) {
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        * mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
                         + 1
-                        + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-            inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
+                        + (mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
         }
 
         // Weight
         std::vector<DimSize_t> weightDims{outputDims[1], 1};
         for (std::size_t i = 0; i < DIM; ++i) {
-            weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
+            weightDims.push_back(mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
         }
         std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
         weightIdxDims[0] = firstEltDims[1];
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index fa5e7bf927177a61a4a90f40ff2d15d625c1f4ef..c28a0587a755ef0a910ec5bfdeb9caa2f1edc216 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -22,9 +22,8 @@
 
 void Aidge::Gather_OpImpl::forward() {
     const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
-    const auto axis = op.template getAttr<std::int8_t>("Axis");
 
-    const std::size_t axisIdx = static_cast<std::size_t>(axis) + (axis >= 0 ? 0 : op.getInput(0)->dims().size());
+    const std::size_t axisIdx = static_cast<std::size_t>(op.axis()) + (op.axis() >= 0 ? 0 : op.getInput(0)->dims().size());
 
     std::size_t postAxisElems = 1;
     for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) {
@@ -38,11 +37,11 @@ void Aidge::Gather_OpImpl::forward() {
     std::size_t outputOffset = 0;
     for (std::size_t i=0; i<preAxisElems; ++i)
     {
-        for(std::size_t j=0; j<op.template getAttr<std::vector<int64_t>>("Indices").size(); ++j)
+        for(std::size_t j = 0; j < op.indices().size(); ++j)
         {
-            const std::size_t idx = op.template getAttr<std::vector<int64_t>>("Indices")[j] >= 0 ?
-                                        static_cast<std::size_t>(op.template getAttr<std::vector<int64_t>>("Indices")[j]) :
-                                        static_cast<std::size_t>(op.template getAttr<std::vector<int64_t>>("Indices")[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
+            const std::size_t idx = op.indices()[j] >= 0 ?
+                                        static_cast<std::size_t>(op.indices()[j]) :
+                                        static_cast<std::size_t>(op.indices()[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
             op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset);
             outputOffset += postAxisElems;
         }
@@ -64,7 +63,7 @@ bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) {
     if (inputsAssociated()) {
         // Copy optional input #1, if present, to attribute Indices
         if (getInput(1)) {
-            if (!this->template getAttr<GatherAttr::Indices>().empty()) {
+            if (!this->indices().empty()) {
                 Log::notice("Gather_Op: ignoring non-empty Indices attribute because input#1 takes precedence");
             }
 
@@ -74,34 +73,34 @@ bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) {
             }
 
             std::shared_ptr<Tensor> fallback;
-            this->template getAttr<GatherAttr::GatheredShape>() = getInput(1)->dims();
-            this->template getAttr<GatherAttr::Indices>().clear(); // If both are provided input would override attrs
-            this->template getAttr<GatherAttr::Indices>().reserve(getInput(1)->size());
+            this->gatheredShape() = getInput(1)->dims();
+            this->indices().clear(); // If both are provided input would override attrs
+            this->indices().reserve(getInput(1)->size());
             const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
             std::copy_n(static_cast<int64_t*>(indices.getImpl()->hostPtr()),
                         indices.size(),
-                        std::back_inserter(this->template getAttr<GatherAttr::Indices>()));
+                        std::back_inserter(this->indices()));
         }
 
-        AIDGE_ASSERT(!this->template getAttr<GatherAttr::Indices>().empty(), "Missing input#1 or Indices attribute");
+        AIDGE_ASSERT(!this->indices().empty(), "Missing input#1 or Indices attribute");
 
         // Compute output dims
         std::vector<DimSize_t> outDims = getInput(0)->dims();
 
-        std::int8_t axisIdx = this->template getAttr<GatherAttr::Axis>()>=0?
-                                this->template getAttr<GatherAttr::Axis>():
-                                this->template getAttr<GatherAttr::Axis>()+outDims.size();
+        std::int8_t axisIdx = this->axis()>=0?
+                                this->axis():
+                                this->axis()+outDims.size();
         outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
-        if( !this->template getAttr<GatherAttr::GatheredShape>().empty())
+        if( !this->gatheredShape().empty())
         {
             outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx),
-                            this->template getAttr<GatherAttr::GatheredShape>().begin(),
-                            this->template getAttr<GatherAttr::GatheredShape>().end());
+                            this->gatheredShape().begin(),
+                            this->gatheredShape().end());
         }
         mOutputs[0]->resize(outDims);
         return true;
     }
-    
+
     return false;
 }
 
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 07d54aaf8505bdd95849f5972b7293e949dbe72f..adf79b5c69e991ad7979184c313448e4288a8ecb 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -24,14 +24,13 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getNbRequiredData(
     Aidge::IOIndex_t inputIdx) const
 {
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
 
-    if (scheduleStep == 0 && inputIdx == 0) {
+    if (op.scheduleStep() == 0 && inputIdx == 0) {
         // No data input is required for the initial step.
         // Initialization data is required however.
         return Elts_t::NoneElts();
     }
-    else if (scheduleStep > 0 && inputIdx == 1) {
+    else if (op.scheduleStep() > 0 && inputIdx == 1) {
         // No initialization data is required after the initial step.
         return Elts_t::NoneElts();
     }
@@ -45,10 +44,8 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getRequiredMemory(const Aidge::IOIndex_t o
     assert(mOp.getRawOutput(outputIdx) && "requires valid output");
 
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
-    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
 
-    if (endStep > 0 && outputIdx == 1 && scheduleStep >= endStep) {
+    if ((op.endStep() > 0) && (outputIdx == 1) && (op.scheduleStep() >= op.endStep())) {
         return Elts_t::NoneElts();
     }
     else {
@@ -60,18 +57,15 @@ void Aidge::Memorize_OpImpl::updateConsummerProducer() {
     OperatorImpl::updateConsummerProducer();
 
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
-    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
-    AIDGE_ASSERT(endStep == 0 || scheduleStep <= endStep, "cannot update consumer producer anymore, number of cycles exceeded");
+    AIDGE_ASSERT(op.endStep() == 0 || op.scheduleStep() <= op.endStep(), "cannot update consumer producer anymore, number of cycles exceeded");
 }
 
 void Aidge::Memorize_OpImpl::forward() {
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int forwardStep = op.template getAttr<MemorizeAttr::ForwardStep>();
-    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
-    AIDGE_ASSERT(endStep == 0 || forwardStep <= endStep, "cannot forward anymore, number of cycles exceeded");
 
-    if (forwardStep == 0) {
+    AIDGE_ASSERT((op.endStep() == 0) || (op.forwardStep() <= op.endStep()), "cannot forward anymore, number of cycles exceeded");
+
+    if (op.forwardStep() == 0) {
         op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size());
     }
     else {
@@ -83,8 +77,8 @@ const std::string Aidge::Memorize_Op::Type = "Memorize";
 
 void Aidge::Memorize_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
-    ++this->template getAttr<MemorizeAttr::ScheduleStep>();
-    this->template getAttr<MemorizeAttr::ForwardStep>() = 0;
+    ++mAttributes->template getAttr<MemorizeAttr::ScheduleStep>();
+    mAttributes->template getAttr<MemorizeAttr::ForwardStep>() = 0;
 }
 
 bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) {
@@ -128,6 +122,6 @@ void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
 
 void Aidge::Memorize_Op::forward() {
     Operator::forward();
-    ++this->template getAttr<MemorizeAttr::ForwardStep>();
-    this->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
+    ++mAttributes->template getAttr<MemorizeAttr::ForwardStep>();
+    mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
 }
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index e508712b0a60a9c09530a31257d9e0b76486d3cb..5df90020a43ad6cffebcd2345c075837f11462b1 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -9,7 +9,6 @@
  *
  ********************************************************************************/
 
-#include <cassert>
 #include <memory>
 
 #include "aidge/operator/OperatorTensor.hpp"
@@ -50,6 +49,11 @@ void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, cons
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
 }
 
+void Aidge::OperatorTensor::resetInput(const Aidge::IOIndex_t inputIdx) {
+    AIDGE_ASSERT(inputIdx < nbInputs(), "Input idx out of range.");
+    mInputs[inputIdx] = nullptr;
+}
+
 void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
     if (getInput(inputIdx)) {
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index afdc1b2ee27793ece078f8ca541d569dbf930816..2fcc46a460ffd7c7f6746dfcd108acbaafe912de 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -30,9 +30,9 @@ Aidge::Elts_t Aidge::Pop_OpImpl::getNbRequiredData(const Aidge::IOIndex_t inputI
 
 void Aidge::Pop_OpImpl::forward() {
     const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+
     assert(op.getInput(0) && "missing input #0");
-    const unsigned int forwardStep = op.template getAttr<PopAttr::ForwardStep>();
-    *op.getOutput(0) = op.getInput(0)->extract({forwardStep});
+    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()});
 }
 
 const std::string Aidge::Pop_Op::Type = "Pop";
@@ -50,7 +50,7 @@ bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
 
 void Aidge::Pop_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
-    this->template getAttr<PopAttr::ForwardStep>() = 0;
+    mAttributes->template getAttr<PopAttr::ForwardStep>() = 0;
 }
 
 void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
@@ -65,5 +65,5 @@ void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
 
 void Aidge::Pop_Op::forward() {
     Operator::forward();
-    ++this->template getAttr<PopAttr::ForwardStep>();
+    ++mAttributes->template getAttr<PopAttr::ForwardStep>();
 }
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 1e09919031c07af8866c45bc11f8eef8045bbbee..bdb69452ec54fb635d0cbc299336071295f37ae1 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -29,7 +29,8 @@ const std::string Aidge::Producer_Op::Type = "Producer";
 
 Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant)
     : OperatorTensor(Type, {}, 1),
-      Attributes_(attr<ProdAttr::Constant>(constant))
+      mAttributes(std::make_shared<Attributes_>(
+        attr<ProdAttr::Constant>(constant)))
 {
     mOutputs[0] = tensor; // copy the pointer of the Tensor
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
@@ -47,7 +48,7 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
  */
 Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     : OperatorTensor(op),
-      Attributes_(op)
+      mAttributes(op.mAttributes)
 {
     mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0)));
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 6b269d91e7783b980dc634a63378dda2f9d858fd..96f2f855f46275e167acb1300434f8bcdbdd7d3e 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -29,7 +29,7 @@ const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // make Axes attribute positive
-        std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
+        std::vector<std::int32_t>& axes = mAttributes->template getAttr<ReduceMeanAttr::Axes>();
         std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
             if (val < 0)
                 val+=static_cast<std::int32_t>(getInput(0)->nbDims());
@@ -38,7 +38,7 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
 
         // build output dimensions
         std::vector<DimSize_t> outDims = getInput(0)->dims();
-        if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
+        if (mAttributes->template getAttr<ReduceMeanAttr::KeepDims>()) {
             std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
         }
         else {
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 259288cc14998b4065697a4cad45ee8838b1d8f5..1838c008a6b83548b6a5a80af0363e2cf239b649 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -43,7 +43,7 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
     if (inputsAssociated()) {
         // Copy optional input #1, if present, to attribute Shape
         if (getInput(1)) {
-            if (!this->template getAttr<ReshapeAttr::Shape>().empty()) {
+            if (!this->shape().empty()) {
                 Log::notice("Reshape_Op: ignoring non-empty Shape attribute because input#1 takes precedence");
             }
 
@@ -53,15 +53,15 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
             }
 
             std::shared_ptr<Tensor> fallback;
-            this->template getAttr<ReshapeAttr::Shape>().clear(); // If both are provided input would override attrs
-            this->template getAttr<ReshapeAttr::Shape>().reserve(getInput(1)->size());
+            this->shape().clear(); // If both are provided input would override attrs
+            this->shape().reserve(getInput(1)->size());
             const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
             std::copy_n(static_cast<int64_t*>(shape.getImpl()->hostPtr()),
                         shape.size(),
-                        std::back_inserter(this->template getAttr<ReshapeAttr::Shape>()));
+                        std::back_inserter(this->shape()));
         }
 
-        AIDGE_ASSERT(!this->template getAttr<ReshapeAttr::Shape>().empty(), "Missing input#1 or Shape attribute");
+        AIDGE_ASSERT(!this->shape().empty(), "Missing input#1 or Shape attribute");
 
         // Compute output dims
         std::vector<DimSize_t> outDims;
@@ -69,9 +69,9 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
         bool foundNegativeDimension = false;
         std::size_t outSize = 1;
         DimIdx_t negativeIndex = 0;
-        for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i)
+        for(std::size_t i = 0; i < this->shape().size(); ++i)
         {
-            int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i];
+            int64_t dimSize = this->shape()[i];
             if (dimSize < 0) {
                 if (foundNegativeDimension) {
                     AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
@@ -80,7 +80,7 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
                 dimSize = 1;
                 negativeIndex = static_cast<DimIdx_t>(i);
             }
-            else if (dimSize == 0 && !this->template getAttr<ReshapeAttr::AllowZero>())
+            else if (dimSize == 0 && !mAttributes->template getAttr<ReshapeAttr::AllowZero>())
             {
                 dimSize = getInput(0) -> dims()[i];
             }
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index d99da0aa1cb50c5e9fa719a1ece2f2ddf5a243e8..8166712e1e5fd967bb9328e95ecf8c5388636ba7 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -21,10 +21,10 @@
 
 void Aidge::Shape_OpImpl::forward() {
     const Shape_Op& op = dynamic_cast<const Shape_Op&>(mOp);
-    const auto start = op.template getAttr<std::int64_t>("Start");
-    const auto end = op.template getAttr<std::int64_t>("End");
+    const auto start = op.start();
+    const auto end = op.end();
 
-    op.getOutput(0)->getImpl()->copyCast(std::next(op.getInput(0)->dims().data(), 
+    op.getOutput(0)->getImpl()->copyCast(std::next(op.getInput(0)->dims().data(),
                                                    start),
                                          DataType::UInt64,
                                          end - start + 1);
@@ -34,13 +34,13 @@ const std::string Aidge::Shape_Op::Type = "Shape";
 
 bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
-        if (this->template getAttr<std::int64_t>("Start") < 0)
-            this->template getAttr<std::int64_t>("Start") += static_cast<std::int64_t>(getInput(0)->nbDims());
-        if (this->template getAttr<std::int64_t>("End") < 0)
-            this->template getAttr<std::int64_t>("End") += static_cast<std::int64_t>(getInput(0)->nbDims());
+        if (mAttributes->template getAttr<std::int64_t>("Start") < 0)
+            mAttributes->template getAttr<std::int64_t>("Start") += static_cast<std::int64_t>(getInput(0)->nbDims());
+        if (mAttributes->template getAttr<std::int64_t>("End") < 0)
+            mAttributes->template getAttr<std::int64_t>("End") += static_cast<std::int64_t>(getInput(0)->nbDims());
 
-        const auto start = this->template getAttr<std::int64_t>("Start");
-        const auto end = this->template getAttr<std::int64_t>("End");
+        const auto start = mAttributes->template getAttr<std::int64_t>("Start");
+        const auto end = mAttributes->template getAttr<std::int64_t>("End");
         const auto nbDims = static_cast<std::int64_t>(getInput(0)->nbDims());
         const DimSize_t roi = end - start + 1;
 
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index e3040ba26ec14ea32f64590e42059e472e43deae..f15a150f36402d0e3b7ae059084b7dd0b556919d 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -46,7 +46,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
         std::shared_ptr<Tensor> fallback;
         // Copy optional input #1, if present, to attribute Starts
         if (getInput(1)) {
-            if (!this->template getAttr<SliceAttr::Starts>().empty()) {
+            if (!this->starts().empty()) {
                 Log::notice("Slice_Op: ignoring non-empty Starts attribute because input#1 takes precedence");
             }
 
@@ -55,19 +55,19 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
                 return false;
             }
 
-            this->template getAttr<SliceAttr::Starts>().clear(); // If both are provided input would override attrs
-            this->template getAttr<SliceAttr::Starts>().reserve(getInput(1)->size());
+            this->starts().clear(); // If both are provided input would override attrs
+            this->starts().reserve(getInput(1)->size());
             const auto& starts = getInput(1)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
             std::copy_n(static_cast<int64_t*>(starts.getImpl()->hostPtr()),
                         starts.size(),
-                        std::back_inserter(this->template getAttr<SliceAttr::Starts>()));
+                        std::back_inserter(this->starts()));
         }
 
-        AIDGE_ASSERT(!this->template getAttr<SliceAttr::Starts>().empty(), "Missing input#1 or Starts attribute");
+        AIDGE_ASSERT(!this->starts().empty(), "Missing input#1 or Starts attribute");
 
         // Copy optional input #2, if present, to attribute Ends
         if (getInput(2)) {
-            if (!this->template getAttr<SliceAttr::Ends>().empty()) {
+            if (!this->ends().empty()) {
                 Log::notice("Slice_Op: ignoring non-empty Ends attribute because input#2 takes precedence");
             }
 
@@ -76,19 +76,19 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
                 return false;
             }
 
-            this->template getAttr<SliceAttr::Ends>().clear(); // If both are provided input would override attrs
-            this->template getAttr<SliceAttr::Ends>().reserve(getInput(2)->size());
+            this->ends().clear(); // If both are provided input would override attrs
+            this->ends().reserve(getInput(2)->size());
             const auto& ends = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
             std::copy_n(static_cast<int64_t*>(ends.getImpl()->hostPtr()),
                         ends.size(),
-                        std::back_inserter(this->template getAttr<SliceAttr::Ends>()));
+                        std::back_inserter(this->ends()));
         }
 
-        AIDGE_ASSERT(!this->template getAttr<SliceAttr::Ends>().empty(), "Missing input#2 or Ends attribute");
+        AIDGE_ASSERT(!this->ends().empty(), "Missing input#2 or Ends attribute");
 
         // Copy optional input #3, if present, to attribute Axes
         if (getInput(3)) {
-            if (!this->template getAttr<SliceAttr::Axes>().empty()) {
+            if (!this->axes().empty()) {
                 Log::notice("Slice_Op: ignoring non-empty Axes attribute because input#3 takes precedence");
             }
 
@@ -97,19 +97,19 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
                 return false;
             }
 
-            this->template getAttr<SliceAttr::Axes>().clear(); // If both are provided input would override attrs
-            this->template getAttr<SliceAttr::Axes>().reserve(getInput(3)->size());
+            this->axes().clear(); // If both are provided input would override attrs
+            this->axes().reserve(getInput(3)->size());
             const auto& axes = getInput(3)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
             std::copy_n(static_cast<int8_t*>(axes.getImpl()->hostPtr()),
                         axes.size(),
-                        std::back_inserter(this->template getAttr<SliceAttr::Axes>()));
+                        std::back_inserter(this->axes()));
         }
 
-        AIDGE_ASSERT(!this->template getAttr<SliceAttr::Axes>().empty(), "Missing input#3 or Axes attribute");
+        AIDGE_ASSERT(!this->axes().empty(), "Missing input#3 or Axes attribute");
 
         // Copy optional input #4, if present, to attribute Steps
         if (getInput(4)) {
-            if (!this->template getAttr<SliceAttr::Steps>().empty()) {
+            if (!this->steps().empty()) {
                 Log::notice("Slice_Op: ignoring non-empty Steps attribute because input#4 takes precedence");
             }
 
@@ -118,34 +118,34 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
                 return false;
             }
 
-            this->template getAttr<SliceAttr::Steps>().clear(); // If both are provided input would override attrs
-            this->template getAttr<SliceAttr::Steps>().reserve(getInput(4)->size());
+            this->steps().clear(); // If both are provided input would override attrs
+            this->steps().reserve(getInput(4)->size());
             const auto& steps = getInput(4)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
             std::copy_n(static_cast<int64_t*>(steps.getImpl()->hostPtr()),
                         steps.size(),
-                        std::back_inserter(this->template getAttr<SliceAttr::Steps>()));
+                        std::back_inserter(this->steps()));
         }
 
         // Fill Steps attr if empty
-        if(this->template getAttr<SliceAttr::Steps>().empty()) {
+        if(this->steps().empty()) {
             // In case the input Steps is not provided, default value is 1
-            this->template getAttr<SliceAttr::Steps>() = std::vector<std::int64_t>(this->template getAttr<SliceAttr::Axes>().size(), 1);
+            this->steps() = std::vector<std::int64_t>(this->axes().size(), 1);
         }
 
         // Compute output dims
-        const DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
+        const DimSize_t nbAxes = this->axes().size();
         std::vector<DimSize_t> outDims = getInput(0)->dims();
         for (std::size_t i = 0; i < nbAxes; ++i) {
-            const DimIdx_t axis = this->template getAttr<SliceAttr::Axes>()[i] >= 0 ?
-                            static_cast<DimIdx_t>(this->template getAttr<SliceAttr::Axes>()[i]) :
-                            static_cast<DimIdx_t>(this->template getAttr<SliceAttr::Axes>()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims()));
-            const DimSize_t start = this->template getAttr<SliceAttr::Starts>()[i] >= 0 ?
-                                static_cast<DimSize_t>(this->template getAttr<SliceAttr::Starts>()[i]) :
-                                static_cast<DimSize_t>(this->template getAttr<SliceAttr::Starts>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
-            const DimSize_t end = this->template getAttr<SliceAttr::Ends>()[i] >= 0 ?
-                            static_cast<DimSize_t>(this->template getAttr<SliceAttr::Ends>()[i]) :
-                            static_cast<DimSize_t>(this->template getAttr<SliceAttr::Ends>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
-            const std::int64_t step = this->template getAttr<SliceAttr::Steps>()[i];
+            const DimIdx_t axis = this->axes()[i] >= 0 ?
+                            static_cast<DimIdx_t>(this->axes()[i]) :
+                            static_cast<DimIdx_t>(this->axes()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims()));
+            const DimSize_t start = this->starts()[i] >= 0 ?
+                                static_cast<DimSize_t>(this->starts()[i]) :
+                                static_cast<DimSize_t>(this->starts()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+            const DimSize_t end = this->ends()[i] >= 0 ?
+                            static_cast<DimSize_t>(this->ends()[i]) :
+                            static_cast<DimSize_t>(this->ends()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+            const std::int64_t step = this->steps()[i];
 
             AIDGE_ASSERT(step != 0, "Slice_Op: Step ({}) must have a non-zero value on axis {}!", this->template getAttr<SliceAttr::Steps>(), axis);
             if(step * (static_cast<int64_t>(end) - static_cast<int64_t>(start)) < 0) {
@@ -169,7 +169,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
         mOutputs[0]->resize(outDims);
         return true;
     }
-    
+
     return false;
 }
 
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 9773c013ff062a1970f92033404f2d57d06f2ae7..69820a924105acc8bea817aecb90e0aa278fce06 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -25,17 +25,16 @@
 
 void Aidge::TransposeImpl::forward() {
     const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
-    op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.getAttr<std::vector<DimSize_t>>(0));
+    op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.outputDimsOrder());
 }
 
 const std::string Aidge::Transpose_Op::Type = "Transpose";
 
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
-        const auto& outDimsOrder = getAttr<std::vector<DimSize_t>>(0);
         std::vector<DimSize_t> outputDims;
-        for (std::size_t i = 0; i < outDimsOrder.size(); ++i) {
-            outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
+        for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
+            outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
         }
         mOutputs[0]->resize(outputDims);
         return true;
diff --git a/src/recipes/ConstantFolding.cpp b/src/recipes/ConstantFolding.cpp
index 42fb45224614ca2655165a69b974cfe229e27f90..40b0bda766ab243805349b13e93391c5a60df63a 100644
--- a/src/recipes/ConstantFolding.cpp
+++ b/src/recipes/ConstantFolding.cpp
@@ -44,7 +44,7 @@ void Aidge::constantFolding(std::shared_ptr<GraphView> graph) {
                     }
 
                     const auto& producer = std::static_pointer_cast<Producer_Op>(input.first->getOperator());
-                    if (!producer->getAttr<bool>("Constant")) {
+                    if (!producer->constant()) {
                         Log::info("Node {} (of type {}) not foldable because Producer input {} not Constant",
                             node->name(), node->type(), input.first->name());
                         foldable = false;
diff --git a/src/recipes/ExplicitCastMove.cpp b/src/recipes/ExplicitCastMove.cpp
index 7d836c3acc835c5ed3fe014db6787029dc318afd..c860b9e8a0e1fcbf467eb13e1366f371d731a47d 100644
--- a/src/recipes/ExplicitCastMove.cpp
+++ b/src/recipes/ExplicitCastMove.cpp
@@ -73,7 +73,7 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) {
 
         IOIndex_t inputIdx = 0;
         for (auto parent : node->inputs()) {
-            // TODO: possible optimization: currently, a Cast/Move Operator may 
+            // TODO: possible optimization: currently, a Cast/Move Operator may
             // be added several time to the same output, if it has multiple childs,
             // even if it is the same conversion each time.
             if (parent.first != nullptr) {
@@ -91,8 +91,8 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) {
 
                 if (node->type() != Cast_Op::Type && input->dataType() != output->dataType()) {
                     // Change of date type => a Cast operator is required
-                    castOp = Cast();
-                    castOp->getOperator()->setDataType(output->dataType());
+                    castOp = Cast(output->dataType());
+                    // castOp->getOperator()->setDataType(output->dataType());
                     castOp->getOperator()->setBackend(device.first, device.second);
 
                     if (moveOp == nullptr) {
diff --git a/src/recipes/ExplicitTranspose.cpp b/src/recipes/ExplicitTranspose.cpp
index a12e76e9ddb701a370bfd29b70ad0775eee55962..7ff971b7e436219d5dfbb7cbadbaf780d3f1aeda 100644
--- a/src/recipes/ExplicitTranspose.cpp
+++ b/src/recipes/ExplicitTranspose.cpp
@@ -57,7 +57,7 @@ void Aidge::explicitTranspose(std::shared_ptr<GraphView> graph) {
 
         IOIndex_t inputIdx = 0;
         for (auto parent : node->inputs()) {
-            // TODO: possible optimization: currently, a Transpose Operator may 
+            // TODO: possible optimization: currently, a Transpose Operator may
             // be added several time to the same output, if it has multiple childs,
             // even if it is the same conversion each time.
             if (parent.first != nullptr) {
@@ -97,7 +97,7 @@ void Aidge::explicitTranspose(std::shared_ptr<GraphView> graph) {
                             const auto transpose = getDataFormatTranspose(parentInput->dataFormat(), output->dataFormat());
                             auto transposeOp = std::static_pointer_cast<Transpose_Op>(parent.first->getOperator());
                             transposeOp->setDataFormat(output->dataFormat());
-                            transposeOp->getAttr<std::vector<DimSize_t>>(0) = std::vector<DimSize_t>(transpose.begin(), transpose.end());
+                            transposeOp->outputDimsOrder() = std::vector<DimSize_t>(transpose.begin(), transpose.end());
                         }
                     }
                     else {
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 7c8c9c2ba2119e0dc708ef4b788690eb223ea0b3..aa20a056ad789975c5b4d493a1ce48dcd7592946 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -62,13 +62,13 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
             std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
         convNbOutChannels = convOpPtr->outChannels();
         channelsSize = convOpPtr->inChannels();
-        kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+        kernelDims = convOpPtr->kernelDims();
     }
     else if (convNode->type() == ConvDepthWise_Op<2>::Type) {
         const std::shared_ptr<ConvDepthWise_Op<2>> convOpPtr =
             std::static_pointer_cast<ConvDepthWise_Op<2>>(convNode->getOperator());
         convNbOutChannels = convOpPtr->nbChannels();
-        kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+        kernelDims = convOpPtr->kernelDims();
     }
     AIDGE_ASSERT(kernelDims.size() == 2, "fuseBatchNorm(): only 2D convolutions are supported");
 
@@ -78,7 +78,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
     const Tensor& b_mean = batchOp->getInput(3)->refCastFrom(b_meanBuf, DataType::Float32, "cpu");
     const Tensor& b_var = batchOp->getInput(4)->refCastFrom(b_varBuf, DataType::Float32, "cpu");
 
-    const float epsilon = batchOp->getAttr<float>("Epsilon");
+    const float epsilon = batchOp->epsilon();
 
 
     assert(epsilon > 0.0);
diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp
index b6cd0498165835c2c308b64fb1ea9ac188fb2154..88691c26d5d7013874c13000535ec2a3842d47d3 100644
--- a/src/recipes/HorizontalTiling.cpp
+++ b/src/recipes/HorizontalTiling.cpp
@@ -94,6 +94,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
 
         auto slice = Slice();
         auto backend = outTensor->getImpl()->backend();
+
         // Create Slice's Starts producer node
         std::vector<std::int64_t> inputDimsStart(inputDims[0].first.size());
         for (std::size_t dim = 0; dim < inputDimsStart.size(); ++dim) {
@@ -141,6 +142,8 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         auto stepsNode = Producer(steps, slice->name() + sliceInputsNames[4]);
         stepsNode -> addChild(slice, 0, 4);
 
+        // auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, inputDimsSteps);
+        // auto backend = outTensor->getImpl()->backend();
         slice -> addChild(newNode, 0, 0);
         newNode -> addChild(concat, 0, i);
 
diff --git a/src/recipes/LabelGraph.cpp b/src/recipes/LabelGraph.cpp
index ac0e6bfe197460c8c422a6c1f3b3240518ee1f29..75bcd36bf61f7c23645038bedb060cd13bdce2c5 100644
--- a/src/recipes/LabelGraph.cpp
+++ b/src/recipes/LabelGraph.cpp
@@ -22,7 +22,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == Conv_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvAttr::KernelDims>(), op->template getAttr<ConvAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -30,7 +30,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == ConvDepthWise_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<ConvDepthWise_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvDepthWiseAttr::KernelDims>(), op->template getAttr<ConvDepthWiseAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -38,7 +38,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == AvgPooling_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<AvgPooling_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<AvgPoolingAttr::KernelDims>(), op->template getAttr<AvgPoolingAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
         return std::make_shared<Node>(newOp, node->name());
     }
 
diff --git a/src/utils/Attributes.cpp b/src/utils/Attributes.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e79db53a60a955e3502e070cda5818d3d7b6c922
--- /dev/null
+++ b/src/utils/Attributes.cpp
@@ -0,0 +1,96 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/utils/Attributes.hpp"
+
+#include <cctype>  // std::isdigit, std::islower, std::isupper, std::tolower,
+                   // std::toupper
+#include <string>
+
+std::string Aidge::Attributes::snakeToPascal(const std::string& snakeCase) {
+    std::string result;
+    bool to_upper = true; // Start with uppercase for PascalCase
+
+    for (char ch : snakeCase) {
+        if (ch == '_') {
+            to_upper = true; // Next character should be uppercase
+        } else {
+            if (to_upper) {
+                result += std::toupper(ch);
+                to_upper = false; // Reset flag after making a character uppercase
+            } else {
+                result += ch;
+            }
+        }
+    }
+    return result;
+}
+
+std::string Aidge::Attributes::pascalToSnake(const std::string& pascalCase) {
+    std::string result;
+
+    for (char ch : pascalCase) {
+        if (std::isupper(ch)) {
+            if (!result.empty()) {
+                result += '_';
+            }
+            result += std::tolower(ch);
+        } else {
+            result += ch;
+        }
+    }
+    return result;
+}
+
+bool Aidge::Attributes::isPascalCase(const std::string& str) {
+    if (str.empty() || !std::isupper(str[0])) {
+        return false;
+    }
+
+    bool expectUpper = false;
+    for (size_t i = 1; i < str.size(); ++i) {
+        if (str[i] == '_') {
+            return false;
+        }
+        if (std::isupper(str[i])) {
+            if (!expectUpper) {
+                return false;
+            }
+            expectUpper = false;
+        } else if (std::islower(str[i]) || std::isdigit(str[i])) {
+            expectUpper = true;
+        } else {
+            return false;
+        }
+    }
+    return true;
+}
+
+bool Aidge::Attributes::isSnakeCase(const std::string& str) {
+    if (str.empty()) {
+        return false;
+    }
+
+    bool lastCharWasUnderscore = false;
+    for (char ch : str) {
+        if (ch == '_') {
+            if (lastCharWasUnderscore) {
+                return false;
+            }
+            lastCharWasUnderscore = true;
+        } else if (!std::islower(ch) && !std::isdigit(ch)) {
+            return false;
+        } else {
+            lastCharWasUnderscore = false;
+        }
+    }
+    return true;
+}
diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp
index 903eefc0c7e7a34170d60dc136e792b8687e96e3..6abb4d37114d0952feb13c6cfbee66bd65dc5748 100644
--- a/unit_tests/graph/Test_Matching.cpp
+++ b/unit_tests/graph/Test_Matching.cpp
@@ -323,7 +323,7 @@ TEST_CASE("[core/graph] Matching") {
         gm.addNodeLambda("3x3", [](const NodePtr& node) {
             const std::shared_ptr<Conv_Op<2>> op =
                 std::static_pointer_cast<Conv_Op<2>>(node->getOperator());
-            return (op->getAttr<std::array<DimSize_t, 2>>("KernelDims") == std::array<DimSize_t, 2>({3, 3}));
+            return (op->kernelDims() == std::array<DimSize_t, 2>({3, 3}));
         });
 
         const auto results = gm.match("Pad->Conv[3x3]->ReLU");
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 8d634cc3a105c423b54b6003f41204aeb1fc5335..41bad69749fd82f892c6faa625739d0493396c73 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -20,7 +20,7 @@ using namespace Aidge;
 TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("INT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        const char* key = "intAttr";
+        const char* key = "IntAttr";
         Testop.addAttr(key, int(5));
         int registeredVal = Testop.getAttr<int>(key);
         REQUIRE(registeredVal == 5);
@@ -28,21 +28,21 @@ TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("LONG") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         long value = 3;
-        const char* key = "longAttr";
+        const char* key = "LongAttr";
         Testop.addAttr(key, value);
         REQUIRE(Testop.getAttr<long>(key) == value);
     }
     SECTION("FLOAT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         float value = 2.0;
-        const char* key = "floatAttr";
+        const char* key = "FloatAttr";
         Testop.addAttr(key, value);
         REQUIRE(Testop.getAttr<float>(key) == value);
     }
      SECTION("VECTOR<BOOL>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<bool> value = {true, false, false, true, true};
-        const char* key = "vect";
+        const char* key = "Vect";
         Testop.addAttr(key, value);
 
         REQUIRE(Testop.getAttr<std::vector<bool>>(key).size() == value.size());
@@ -53,7 +53,7 @@ TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("VECTOR<INT>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<int> value = {1, 2, 3, 4, 5, 6, 7, 8, 9};
-        const char* key = "vect";
+        const char* key = "Vect";
         Testop.addAttr(key, value);
 
         REQUIRE(Testop.getAttr<std::vector<int>>(key).size() == value.size());
@@ -66,23 +66,23 @@ TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
         Goal : Test that the offsets are well done by adding different attributes with different size.
         */
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addAttr<long>("longAttr", 3);
-        Testop.addAttr<float>("floatAttr", 2.0);
-        Testop.addAttr<uint8_t>("uint8Attr", 5);
-        Testop.addAttr<long long>("llAttr", 10);
-        REQUIRE(Testop.getAttr<long>("longAttr") == 3);
-        REQUIRE(Testop.getAttr<float>("floatAttr") == 2.0);
-        REQUIRE(Testop.getAttr<uint8_t>("uint8Attr") == 5);
-        REQUIRE(Testop.getAttr<long long>("llAttr") == 10);
+        Testop.addAttr<long>("LongAttr", 3);
+        Testop.addAttr<float>("FloatAttr", 2.0);
+        Testop.addAttr<uint8_t>("Uint8Attr", 5);
+        Testop.addAttr<long long>("LlAttr", 10);
+        REQUIRE(Testop.getAttr<long>("LongAttr") == 3);
+        REQUIRE(Testop.getAttr<float>("FloatAttr") == 2.0);
+        REQUIRE(Testop.getAttr<uint8_t>("Uint8Attr") == 5);
+        REQUIRE(Testop.getAttr<long long>("LlAttr") == 10);
     }
 }
 
 TEST_CASE("[core/operator] GenericOp(type check)", "[Operator]") {
     SECTION("WRONG TYPE FOR GETTER") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addAttr<long>("longAttr", 3);
+        Testop.addAttr<long>("LongAttr", 3);
 
         // This line should raise a failled assert
-        REQUIRE_THROWS(Testop.getAttr<int>("longAttribute"));
+        REQUIRE_THROWS(Testop.getAttr<int>("LongAttribute"));
     }
 }
diff --git a/version.txt b/version.txt
index 0c62199f16ac1e2d7f7ae75b420c1231325dff4e..ee1372d33a29e27945406f0527f8af8e6ee119c9 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.2.1
+0.2.2