From 0354153454d197c09faba04568c7e0835a422794 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Wed, 5 Feb 2025 11:50:41 +0100
Subject: [PATCH 01/31] Fix issue eclipse/aidge/aidge#243

---
 aidge_core/static_analysis.py | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/aidge_core/static_analysis.py b/aidge_core/static_analysis.py
index c65a102a1..b4a82a4fb 100644
--- a/aidge_core/static_analysis.py
+++ b/aidge_core/static_analysis.py
@@ -136,7 +136,8 @@ class StaticAnalysisExt(aidge_core.StaticAnalysis):
                 bot += serie
         else:
             plt.bar(names_only, values)
-        ax.yaxis.minorticks_on()
+        if callable(getattr(ax.yaxis, 'minorticks_on', None)):
+            ax.yaxis.minorticks_on() # introduced in matplotlib 3.9.x
         plt.grid(axis='y', which='major', linestyle='--', color='gray')
         plt.grid(axis='y', which='minor', linestyle=':', color='lightgray')
         formatter0 = matplotlib.ticker.EngFormatter(unit='')
@@ -171,7 +172,8 @@ class StaticAnalysisExt(aidge_core.StaticAnalysis):
                 left += serie
         else:
             plt.barh(names_only, values)
-        ax.xaxis.minorticks_on()
+        if callable(getattr(ax.xaxis, 'minorticks_on', None)):
+            ax.xaxis.minorticks_on() # introduced in matplotlib 3.9.x
         plt.grid(axis='x', which='major', linestyle='--', color='gray')
         plt.grid(axis='x', which='minor', linestyle=':', color='lightgray')
         formatter0 = matplotlib.ticker.EngFormatter(unit='')
-- 
GitLab


From 9141f35d0e47d06d7ccf6393d1a42d4cfbff44f9 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Wed, 5 Feb 2025 12:01:33 +0100
Subject: [PATCH 02/31] Fix bug #231

---
 src/graph/GraphView.cpp | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index e1a520865..fab9be915 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -266,7 +266,12 @@ void Aidge::GraphView::logOutputs(const std::string& dirName) const {
         AIDGE_THROW_OR_ABORT(std::runtime_error,
             "Could not create graph view log file: {}", inputPath);
       }
-      fmt::print(fp.get(), "{}\n", nodePtr->getOperator()->getRawOutput(outIdx)->toString().c_str());
+
+      auto oTensor = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator())->getOutput(outIdx);
+      std::shared_ptr<Tensor> fallback;
+      const Tensor& localTensor = oTensor->refFrom(fallback, "cpu");
+
+      fmt::print(fp.get(), "{}\n", localTensor.toString().c_str());
     }
   }
 }
-- 
GitLab


From c50a7d1faa3a8a74e8d6e40dac5a3f2587b5235a Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 6 Feb 2025 12:23:37 +0100
Subject: [PATCH 03/31] Fixed attributes not properly cloned in GenericOperator
 copy constructor

---
 src/operator/GenericOperator.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index 1e28cf289..e0f7cf34a 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -45,7 +45,7 @@ Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
 Aidge::GenericOperator_Op::GenericOperator_Op(const Aidge::GenericOperator_Op& op)
     : OperatorTensor(op),
         mForwardDims(op.mForwardDims),
-        mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
+        mAttributes(std::make_shared<DynamicAttributes>(*op.mAttributes))
 {
     mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
 }
-- 
GitLab


From 62de1a25c9e4d69167f300982f81b66f8a211ed4 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 6 Feb 2025 12:24:11 +0100
Subject: [PATCH 04/31] Coding style

---
 include/aidge/operator/Operator.hpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 40899ffa7..dd59af175 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -118,12 +118,12 @@ public:
      */
     Operator(const Operator& op):
         std::enable_shared_from_this<Operator>(),
+        mType(op.mType),
         mOperatorType(op.mOperatorType),
         mInputsCategory(op.mInputsCategory),
         mNbOut(op.mNbOut),
         mBackEdges(op.mBackEdges)
     {
-        mType = op.mType;
         mImpl = nullptr;
         // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
         // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
-- 
GitLab


From 2463ebc95e4319e79d2176367eed41e645d80542 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 6 Feb 2025 12:24:51 +0100
Subject: [PATCH 05/31] Fixed incorrect MetaOperator copy constructor and
 clone() method

---
 include/aidge/operator/MetaOperator.hpp |  5 +----
 src/operator/MetaOperator.cpp           | 22 +++++++++++++++++++++-
 2 files changed, 22 insertions(+), 5 deletions(-)

diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index f7f1cdfd5..c6ab45290 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -69,10 +69,7 @@ public:
      * 
      * @param op The operator to copy.
      */
-    MetaOperator_Op(const MetaOperator_Op& op)
-        : OperatorTensor(op),
-          mGraph(op.mGraph->clone()) // Clone the micro-graph for isolation
-    {}
+    MetaOperator_Op(const MetaOperator_Op& op);
 
     /**
      * @brief Set the node for scheduling.
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index ae3c3ed6c..9a8a943fc 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -54,8 +54,28 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar
     }
 }
 
+Aidge::MetaOperator_Op::MetaOperator_Op(const MetaOperator_Op& op)
+    : OperatorTensor(op),
+        mGraph(op.mGraph->clone()), // Clone the micro-graph for isolation
+        mAttributes(std::make_shared<DynamicAttributes>(*op.mAttributes)) // Clone attributes
+{
+    // Associate outputs to micro-graph outputs for custom implementation
+    for (size_t outputIdx = 0; outputIdx < mOutputs.size(); ++outputIdx) {
+        const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
+        if (outputOp.first) {
+            mOutputs[outputIdx] = std::dynamic_pointer_cast<Tensor>(outputOp.first->getOperator()->getRawOutput(outputOp.second));
+        }
+    }
+
+    // Attributes are already cloned.
+}
+
 std::shared_ptr<Aidge::Operator> Aidge::MetaOperator_Op::clone() const {
-    return std::make_shared<MetaOperator_Op>(type(), mGraph->clone());
+    auto metaOp = std::make_shared<MetaOperator_Op>(*this);
+    if (mImpl) {
+        metaOp->setBackend(mImpl->backend());
+    }
+    return metaOp;
 }
 
 void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) {
-- 
GitLab


From 86a757769d8b31ad9495b384988914c04d98e29a Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 6 Feb 2025 12:36:17 +0100
Subject: [PATCH 06/31] Added doc comment

---
 src/operator/MetaOperator.cpp | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 9a8a943fc..96c5b219a 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -73,6 +73,9 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const MetaOperator_Op& op)
 std::shared_ptr<Aidge::Operator> Aidge::MetaOperator_Op::clone() const {
     auto metaOp = std::make_shared<MetaOperator_Op>(*this);
     if (mImpl) {
+        // Only setBackend() is mImpl is not nullptr.
+        // The inner-graph backend is already set in MetaOperator_Op copy
+        // construtor, when the graph is cloned.
         metaOp->setBackend(mImpl->backend());
     }
     return metaOp;
-- 
GitLab


From 6c6b919954d3495bfca199e406305138e19a777d Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 6 Feb 2025 13:55:47 +0100
Subject: [PATCH 07/31] Removed mandatory type attribute for Meta op, which is
 redundant with Meta op impl registry

---
 src/backend/OperatorImpl.cpp | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 71f4f04b2..08f5fe671 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -74,13 +74,6 @@ Aidge::ImplSpec Aidge::OperatorImpl::getRequiredSpec() const {
 
         requiredSpec.outputs.push_back({opTensor.getOutput(i)->dataType(), opTensor.getOutput(i)->dataFormat(), dims});
     }
-    // Attributes
-    if (!mOp.isAtomic()) {
-        requiredSpec.attrs.setAttr("type:!", mOp.type()); // :! mandatory qualifier
-    }
-    else {
-        requiredSpec.attrs.setAttr("type", mOp.type());
-    }
 
     const auto& inhAttrs = mOp.inheritedAttributes();
     if (inhAttrs) {
-- 
GitLab


From 6972035408ab6e30ef5761ae4226a7f156cde981 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 6 Feb 2025 15:06:45 +0100
Subject: [PATCH 08/31] Removed code redundancy

---
 include/aidge/operator/MetaOperatorDefs.hpp   |  21 ++--
 .../operator/pybind_MetaOperatorDefs.cpp      |   3 +-
 src/operator/MetaOperatorDefs/LSTM.cpp        | 109 +++---------------
 3 files changed, 27 insertions(+), 106 deletions(-)

diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 5bb184b80..9597b533c 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -260,6 +260,17 @@ inline std::shared_ptr<Node> PaddedMaxPooling(
     return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode);
 }
 
+/**
+ * @brief Creates an LSTM (Long Short-Term Memory) operation as a MetaOperator.
+ *
+ * This function creates an LSTM operation as a MetaOperator for use in graph-based computation.
+ *
+ * @param[in] seq_length The length of the input sequence.
+ * @return A shared pointer to the MetaOperator_Op representing the LSTM operation.
+ */
+std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length,
+                                         const std::string &name = "");
+
 /**
  * @brief Creates an LSTM (Long Short-Term Memory) operator.
  *
@@ -278,16 +289,6 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels,
                            bool noBias = false,
                            const std::string &name = "");
 
-/**
- * @brief Creates an LSTM (Long Short-Term Memory) operation as a MetaOperator.
- *
- * This function creates an LSTM operation as a MetaOperator for use in graph-based computation.
- *
- * @param[in] seq_length The length of the input sequence.
- * @return A shared pointer to the MetaOperator_Op representing the LSTM operation.
- */
-std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
-
 std::shared_ptr<MetaOperator_Op> LeakyOp();
 std::shared_ptr<Node> Leaky(const int nbTimeSteps,
                             const float beta,
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index b2811fbaa..35f3d2134 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -176,7 +176,8 @@ void declare_LSTMOp(py::module &m) {
        py::arg("nobias") = false,
        py::arg("name") = "");
   m.def("LSTMOp", &LSTM_Op,
-       py::arg("seq_length"));
+       py::arg("seq_length"),
+       py::arg("name") = "");
 }
 
 void declare_LeakyOp(py::module &m) {
diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp
index 22c0469b3..c7fbe8a16 100644
--- a/src/operator/MetaOperatorDefs/LSTM.cpp
+++ b/src/operator/MetaOperatorDefs/LSTM.cpp
@@ -23,11 +23,8 @@
 
 namespace Aidge {
 
-std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
-                           const DimSize_t hiddenChannel,
-                           const DimSize_t seqLength,
-                           bool noBias,
-                           const std::string& name)
+std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
+                                         const std::string& name)
 {
     // Construct micro-graph
     auto input = Identity((!name.empty()) ? name + "_input" : "");
@@ -113,7 +110,18 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
         {hiddenState, 1}, {cellState, 1}});
     microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
 
-    auto metaOp = MetaOperator("LSTM", microGraph, {}, name);
+    return std::make_shared<MetaOperator_Op>("LSTM", microGraph);
+}
+
+std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
+                           const DimSize_t hiddenChannel,
+                           const DimSize_t seqLength,
+                           bool noBias,
+                           const std::string& name)
+{
+    auto op = LSTM_Op(seqLength, name);
+    auto metaOp = std::make_shared<Node>(op, name);
+    op->setUpperNode(metaOp);
     addProducer(metaOp, 1, {hiddenChannel, inChannel}, "wi");
     addProducer(metaOp, 2, {hiddenChannel, inChannel}, "wo");
     addProducer(metaOp, 3, {hiddenChannel, inChannel}, "wf");
@@ -135,93 +143,4 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     return metaOp;
 }
 
-std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
-{
-    // Construct micro-graph
-    auto input = Identity("");
-    auto hiddenState = Memorize(seqLength, "");
-    auto cellState = Memorize(seqLength, "");
-    auto add = Add("");
-
-    // Forget gate
-    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
-    input->addChild(forgetGateX, 0, 0);
-    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
-    hiddenState->addChild(forgetGateH, 1, 0);
-    auto forgetGate = Add("");
-    forgetGateX->addChild(forgetGate, 0, 0);
-    forgetGateH->addChild(forgetGate, 0, 1);
-    auto forgetGateAct = Sigmoid("");
-    auto forgetGateMul = Mul("");
-    forgetGate->addChild(forgetGateAct, 0, 0);
-    forgetGateAct->addChild(forgetGateMul, 0, 0);
-    forgetGateMul->addChild(add, 0, 0);
-    cellState->addChild(forgetGateMul, 1, 1);
-
-    // Input gate
-    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
-    input->addChild(inputGateX, 0, 0);
-    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
-    hiddenState->addChild(inputGateH, 1, 0);
-    auto inputGate = Add("");
-    inputGateX->addChild(inputGate, 0, 0);
-    inputGateH->addChild(inputGate, 0, 1);
-    auto inputGateAct = Sigmoid("");
-    auto inputGateMul = Mul("");
-    inputGate->addChild(inputGateAct, 0, 0);
-    inputGateAct->addChild(inputGateMul, 0, 0);
-    inputGateMul->addChild(add, 0, 1);
-
-    // Candidate for cell update
-    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
-    input->addChild(cellCandidateX, 0, 0);
-    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
-    hiddenState->addChild(cellCandidateH, 1, 0);
-    auto cellCandidate = Add("");
-    cellCandidateX->addChild(cellCandidate, 0, 0);
-    cellCandidateH->addChild(cellCandidate, 0, 1);
-    auto cellCandidateAct = Tanh("");
-    cellCandidate->addChild(cellCandidateAct, 0, 0);
-    cellCandidateAct->addChild(inputGateMul, 0, 1);
-
-    // Output gate
-    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
-    input->addChild(outputGateX, 0, 0);
-    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
-    hiddenState->addChild(outputGateH, 1, 0);
-    auto outputGate = Add("");
-    outputGateX->addChild(outputGate, 0, 0);
-    outputGateH->addChild(outputGate, 0, 1);
-    auto outputGateAct = Sigmoid("");
-    auto outputGateMul = Mul("");
-    outputGate->addChild(outputGateAct, 0, 0);
-    outputGateAct->addChild(outputGateMul, 0, 0);
-
-    // Updated cell state to help determine new hidden state
-    auto cellUpdatedAct = Tanh("");
-    add->addChild(cellUpdatedAct, 0, 0);
-    cellUpdatedAct->addChild(outputGateMul, 0, 1);
-    outputGateMul->addChild(hiddenState, 0, 0);
-    add->addChild(cellState, 0, 0);
-
-    std::shared_ptr<GraphView> microGraph = std::make_shared<GraphView>();
-    microGraph->add(input);
-    microGraph->add({hiddenState, cellState, add,
-        forgetGateX, forgetGateH, forgetGate, forgetGateAct, forgetGateMul,
-        inputGateX, inputGateH, inputGate, inputGateAct, inputGateMul,
-        cellCandidateX, cellCandidateH, cellCandidate, cellCandidateAct,
-        outputGateX, outputGateH, outputGate, outputGateAct, outputGateMul,
-        cellUpdatedAct}, false);
-
-    microGraph->setOrderedInputs({{input, 0},
-        {inputGateX, 1}, {outputGateX, 1}, {forgetGateX, 1}, {cellCandidateX, 1},
-        {inputGateH, 1}, {outputGateH, 1}, {forgetGateH, 1}, {cellCandidateH, 1},
-        {inputGateX, 2}, {outputGateX, 2}, {forgetGateX, 2}, {cellCandidateX, 2},
-        {inputGateH, 2}, {outputGateH, 2}, {forgetGateH, 2}, {cellCandidateH, 2},
-        {hiddenState, 1}, {cellState, 1}});
-    microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
-
-    return std::make_shared<MetaOperator_Op>("LSTM", microGraph);
-}
-
 } // namespace Aidge
-- 
GitLab


From 9b70101bbe90478f0052e105c084eb9d01b0cb6e Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Mon, 10 Feb 2025 14:09:14 +0000
Subject: [PATCH 09/31] Fix
 https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/issues/228

---
 src/utils/Log.cpp | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
index b4c64d527..9755aa61d 100644
--- a/src/utils/Log.cpp
+++ b/src/utils/Log.cpp
@@ -24,6 +24,8 @@ namespace Aidge {
  * @brief Initialize console log level from environment. If compile mode is
  * DEBUG, then the default level is Log::Level::Debug, else it is
  * Log::Level::Notice.
+ *
+ * WARNING: Do not use this variable directly, use getConsoleLevel() instead.
  */
 Log::Level Log::mConsoleLevel = []() {
 #ifndef NDEBUG
@@ -58,7 +60,7 @@ bool Log::mConsoleColor = []() {
  */
 Log::Level Log::mFileLevel = []() {
 #ifndef NDEBUG
-    constexpr Level defaultLevel = Level::Debug;
+    constexpr Log::Level defaultLevel = Level::Debug;
 #else
     constexpr Log::Level defaultLevel = Level::Notice;
 #endif
@@ -164,7 +166,7 @@ void Log::log(Level level, const std::string& msg) {
     const std::size_t levelIndentSizes[6] = {10, 9, 11, 12, 10, 10};
     const std::size_t width = 80 - levelIndentSizes[static_cast<std::size_t>(level)];
 
-    if (level >= mConsoleLevel) {
+    if (level >= getConsoleLevel()) {
         for (const auto& context : mContext) {
             fmt::println("Context: {}", context);
         }
-- 
GitLab


From f547d4e9cc27d4615f96ba5255a91f165d7932ef Mon Sep 17 00:00:00 2001
From: Jerome Hue <jerome.hue@cea.fr>
Date: Thu, 6 Feb 2025 12:26:33 +0100
Subject: [PATCH 10/31] Rename FMT_VERSION to FMT_MIN_VERSION in config.cmake
 template

---
 aidge_core-config.cmake.in | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/aidge_core-config.cmake.in b/aidge_core-config.cmake.in
index d69d24675..f41b4327c 100644
--- a/aidge_core-config.cmake.in
+++ b/aidge_core-config.cmake.in
@@ -1,7 +1,7 @@
 @PACKAGE_INIT@
 
 include(CMakeFindDependencyMacro)
-find_dependency(fmt @FMT_VERSION@)
+find_dependency(fmt @FMT_MIN_VERSION@)
 find_dependency(Threads)
 set(AIDGE_REQUIRES_PYTHON @AIDGE_REQUIRES_PYTHON@)
 set(AIDGE_PYTHON_HAS_EMBED @AIDGE_PYTHON_HAS_EMBED@)
-- 
GitLab


From a547ec1cce31f3b9f71ba4d4f826f3df81670724 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Fri, 14 Feb 2025 12:53:17 +0000
Subject: [PATCH 11/31] [Fix] Add default arg axis=0 for concat

---
 include/aidge/operator/Concat.hpp         |  4 ++--
 python_binding/operator/pybind_Concat.cpp | 24 +++++++++++------------
 2 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 83914b673..ad31ef1a3 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -56,7 +56,7 @@ enum class ConcatAttr {
      *
      * The specified axis determines the direction of concatenating.
      */
-    Axis 
+    Axis
 };
 
 /**
@@ -107,7 +107,7 @@ public:
      * @param[in] nbIn Number of input tensors.
      * @param[in] axis Axis along which concatenation is performed.
      */
-    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis);
+    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis = 0);
 
     /**
      * @brief Copy-constructor. Copies the operator attributes and its output tensors,
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 9e1b3de9e..d2410b03a 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -24,30 +24,30 @@ void init_Concat(py::module& m) {
         R"mydelimiter(
         Initialize a Concat operator.
 
-        :param nb_inputs : The number of input tensors to concatenate.
-        :type nb_inputs : :py:class:`int`
-        :param axis : The axis along which to concatenate the tensors.
-        :type axis : :py:class:`int`
+        :param nb_inputs: The number of input tensors to concatenate.
+        :type nb_inputs: :py:class:`int`
+        :param axis: The axis along which to concatenate the tensors, default=0.
+        :type axis: :py:class:`int`
         )mydelimiter")
         .def(py::init<const IOIndex_t, const int>(),
              py::arg("nb_inputs"),
-             py::arg("axis"))
+             py::arg("axis") = 0)
         .def_static("get_inputs_name", &Concat_Op::getInputsName)
         .def_static("get_outputs_name", &Concat_Op::getOutputsName)
         .def_readonly_static("Type", &Concat_Op::Type);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
 
-    m.def("Concat", &Concat, py::arg("nb_inputs"), py::arg("axis"), py::arg("name") = "",
+    m.def("Concat", &Concat, py::arg("nb_inputs"), py::arg("axis") = 0, py::arg("name") = "",
         R"mydelimiter(
         Initialize a node containing a Concat operator.
 
-        :param nb_inputs : The number of input tensors to concatenate.
-        :type nb_inputs : :py:class:`int`
-        :param axis : The axis along which to concatenate the tensors.
-        :type axis : :py:class:`int`
-        :param name : Name of the node.
-        :type name : :py:class:`str`
+        :param nb_inputs: The number of input tensors to concatenate.
+        :type nb_inputs: :py:class:`int`
+        :param axis: The axis along which to concatenate the tensors.
+        :type axis: :py:class:`int`
+        :param name: Name of the node.
+        :type name: :py:class:`str`
         )mydelimiter");
 }
 
-- 
GitLab


From 759c001fab5e937faaab78c0fb98f0a50f94436b Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Fri, 14 Feb 2025 12:55:07 +0000
Subject: [PATCH 12/31] [Fix] Make Unsqueeze registrable

---
 python_binding/operator/pybind_Unsqueeze.cpp | 29 ++++++++++----------
 1 file changed, 14 insertions(+), 15 deletions(-)

diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index b61cb40ce..7ef8af8b6 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -23,26 +23,25 @@ void init_Unsqueeze(py::module &m) {
   py::class_<Unsqueeze_Op, std::shared_ptr<Unsqueeze_Op>, OperatorTensor>(
       m, "UnsqueezeOp", py::multiple_inheritance(),
       R"mydelimiter(
-		Initialize an unsqueeze operator.
-		:param axes :   axes to unsqueeze between [-r;r-1] 
-						with r = input_tensor.nbDims() + len(axes)
-		:type axes : :py:class: List[Int]
+            Initialize an unsqueeze operator.
+            :param axes:   axes to unsqueeze between [-r;r-1] with r = input_tensor.nbDims() + len(axes)
+            :type axes: :py:class: List[Int]
 		)mydelimiter")
       // Here we bind the methods of the Unsqueeze_Op that will want to access
       .def("get_inputs_name", &Unsqueeze_Op::getInputsName)
       .def("get_outputs_name", &Unsqueeze_Op::getOutputsName)
-      .def("axes", &Unsqueeze_Op::axes);
-  // Here we bind the constructor of the Unsqueeze Node. We add an argument for
-  // each attribute of the operator (in here we only have 'axes') and the last
-  // argument is the node's name.
+      .def_readonly_static("Type", &Unsqueeze_Op::Type)
+      ;
+
+  declare_registrable<Unsqueeze_Op>(m, "UnsqueezeOp");
+
   m.def("Unsqueeze", &Unsqueeze, py::arg("axes") = std::vector<int8_t>({}),
         py::arg("name") = "",
         R"mydelimiter(
-    Initialize a node containing an unsqueeze operator.
-	:param axes :   axes to unsqueeze between [-r;r-1] 
-					with r = input_tensor.nbDims() + len(axes)
-	:type axes : :py:class: List[Int]
-    :param name : name of the node.
-)mydelimiter");
-}
+            Initialize a node containing an unsqueeze operator.
+            :param axes:   axes to unsqueeze between [-r;r-1] with r = input_tensor.nbDims() + len(axes)
+            :type axes: :py:class: List[Int]
+            :param name: name of the node.
+        )mydelimiter");
+    }
 } // namespace Aidge
-- 
GitLab


From 20f20a9e47203668a5f86ac637dcd3ab4b1209b2 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Fri, 14 Feb 2025 13:00:44 +0000
Subject: [PATCH 13/31] [Fix] Make Squeeze registrable + fix python doc.

---
 python_binding/operator/pybind_Squeeze.cpp | 44 +++++++++++-----------
 1 file changed, 22 insertions(+), 22 deletions(-)

diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
index ca90fb46a..188ce745d 100644
--- a/python_binding/operator/pybind_Squeeze.cpp
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -24,29 +24,29 @@ namespace Aidge {
 
 void init_Squeeze(py::module &m) {
   py::class_<Squeeze_Op, std::shared_ptr<Squeeze_Op>, OperatorTensor>(
-      m, "SqueezeOp", py::multiple_inheritance(),
-		R"mydelimiter(
-		Initialize squeeze operator
-		:param axes :   axes to squeeze between [-r;r-1] 
-						with r = input_tensor.nbDims()
-						& r in [-128 , 127]
-		:type axes : :py:class: List[Int]
-		)mydelimiter")
-      .def("get_inputs_name", &Squeeze_Op::getInputsName)
-      .def("get_outputs_name", &Squeeze_Op::getOutputsName)
-      .def("axes", &Squeeze_Op::axes);
-  // Here we bind the constructor of the Squeeze Node. We add an argument
-  // for each attribute of the operator (in here we only have 'axes') and
-  // the last argument is the node's name.
-  m.def("Squeeze", &Squeeze, py::arg("axes") = std::vector<int8_t>({}),
+    m, "SqueezeOp", py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize squeeze operator
+    :param axes:   axes to squeeze between [-r;r-1]
+    				with r = input_tensor.nbDims()
+    				& r in [-128 , 127]
+    :type axes: :py:class: List[Int]
+    )mydelimiter")
+    .def("get_inputs_name", &Squeeze_Op::getInputsName)
+    .def("get_outputs_name", &Squeeze_Op::getOutputsName)
+    .def("axes", &Squeeze_Op::axes);
+
+    declare_registrable<Squeeze_Op>(m, "SqueezeOp");
+    m.def("Squeeze", &Squeeze, py::arg("axes") = std::vector<int8_t>({}),
         py::arg("name") = "",
         R"mydelimiter(
-    Initialize a node containing a squeeze operator.
-	:param axes :   axes to squeeze between [-r;r-1] 
-					with r = input_tensor.nbDims()
-					& r in [-128 , 127]
-	:type axes : :py:class: List[Int]
-    :param name : name of the node.
-)mydelimiter");
+            Initialize a node containing a squeeze operator.
+            :param axes:   axes to squeeze between [-r;r-1]
+                            with r = input_tensor.nbDims()
+                            & r in [-128 , 127]
+            :type axes: :py:class: List[Int]
+            :param name: name of the node.
+            :type name: str
+        )mydelimiter");
 }
 } // namespace Aidge
-- 
GitLab


From 715b436a18e229d2c4da536b2b83c389c0252749 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Fri, 14 Feb 2025 13:05:00 +0000
Subject: [PATCH 14/31] Switch multiple attribute name to follow snake case
 convention.

---
 include/aidge/operator/BitShift.hpp  | 6 +++---
 include/aidge/operator/Resize.hpp    | 8 ++++----
 include/aidge/operator/Squeeze.hpp   | 2 +-
 include/aidge/operator/Unsqueeze.hpp | 2 +-
 4 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index 711cf8585..9368e3461 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
 
 enum class BitShiftAttr {
     /**
-     * 
+     *
      */
     BitShiftdirection
 };
@@ -41,7 +41,7 @@ enum class BitShiftAttr {
  * - **InputTensor**: The tensor whose elements will be shifted.
  * - **ShiftAmount**: The tensor specifying the shift amount for each element.
  *
- * The shift is applied in the direction specified by the attribute `BitShiftdirection`, 
+ * The shift is applied in the direction specified by the attribute `BitShiftdirection`,
  * which can either be `left` or `right`.
  *
  * @see OperatorTensor
@@ -166,7 +166,7 @@ namespace {
  * @brief Specialization of `EnumStrings` for `BitShiftAttr`.
  */
 template <>
-const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = { "BitShiftdirection" };
+const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = { "bit_shift_direction" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index c3c7838ef..89224f927 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -225,10 +225,10 @@ Resize(std::vector<float> scale = std::vector<float>(),
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
-    "coordinateTransformationMode",
-    "cubicCoeffA",
-    "InterpolationMode",
-    "PaddingMode"
+    "coordinate_transformation_mode",
+    "cubic_coeff_a",
+    "interpolation_mode",
+    "padding_mode"
 };
 }
 #endif /* AIDGE_CORE_OPERATOR_RESIZE_H_ */
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 5c966edaf..e3c1f4de1 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -154,7 +154,7 @@ inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {},
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"Axes"};
+const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"axes"};
 }
 
 #endif // AIDGE_CORE_OPERATOR_SQUEEZE_H_
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index c07105405..c25800acb 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -152,7 +152,7 @@ inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"Axes"};
+const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"axes"};
 }
 
 #endif // AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
-- 
GitLab


From c3ffeaeaba8ebcbcb1709e004e70879616cacd12 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Fri, 14 Feb 2025 15:28:15 +0000
Subject: [PATCH 15/31] Update def to def_static for static funtions.

---
 python_binding/operator/pybind_AvgPooling.cpp      | 10 +++++-----
 python_binding/operator/pybind_ConstantOfShape.cpp |  8 ++++----
 python_binding/operator/pybind_Squeeze.cpp         |  4 ++--
 python_binding/operator/pybind_Unsqueeze.cpp       |  4 ++--
 4 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 24549e3f4..852b11303 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -31,17 +31,17 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 
   const std::string pyClassName("AvgPooling" + std::to_string(DIM) + "DOp");
   const std::string pyStaticAttrClassName("StaticAttributes" + pyClassName);
-  
+
   py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor>(
         m, pyClassName.c_str(),
         py::multiple_inheritance(),
         R"mydelimiter(
         Initialize an AvgPooling operator for a tensor.
 
-        This operator performs average pooling on the input tensor using the specified kernel dimensions 
+        This operator performs average pooling on the input tensor using the specified kernel dimensions
         and stride dimensions.
 
-        :param kernel_dims: The size of the kernel (filter) applied during pooling. 
+        :param kernel_dims: The size of the kernel (filter) applied during pooling.
                              Specifies the dimensions of the kernel (e.g., [3, 3] for 2D pooling).
         :type kernel_dims: List[int]
         :param stride_dims: The stride of the pooling operation. Specifies how much the kernel moves in each step.
@@ -52,8 +52,8 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
                   const std::array<DimSize_t, DIM> &>(),
             py::arg("kernel_dims"),
             py::arg("stride_dims") = create_array<DimSize_t, DIM>(1))
-    .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+    .def_static("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
+    .def_static("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
     .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
index 07079d983..5a0e858f1 100644
--- a/python_binding/operator/pybind_ConstantOfShape.cpp
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -27,20 +27,20 @@ void init_ConstantOfShape(py::module &m) {
       R"mydelimiter(
       Initialize a ConstantOfShape operator.
 
-      :param value : Tensor with a given datatype that contains the value 
+      :param value : Tensor with a given datatype that contains the value
                      that will fill the output tensor.
       :type value : :py:class:`Tensor`
       )mydelimiter")
       .def("get_inputs_name", &ConstantOfShape_Op::getInputsName)
-      .def("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
-      .def("value", &ConstantOfShape_Op::value);
+      .def_static("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
+      .def_static("value", &ConstantOfShape_Op::value);
 
   m.def("ConstantOfShape", &ConstantOfShape, py::arg("value") = Tensor(0.f),
         py::arg("name") = "",
         R"mydelimiter(
         Initialize a node containing a ConstantOfShape operator.
 
-        :param value : Tensor with a given datatype that contains the value 
+        :param value : Tensor with a given datatype that contains the value
                        that will fill the output tensor.
         :type value : :py:class:`Tensor`
         :param name  : Name of the node.
diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
index 188ce745d..f7ee4d722 100644
--- a/python_binding/operator/pybind_Squeeze.cpp
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -32,8 +32,8 @@ void init_Squeeze(py::module &m) {
     				& r in [-128 , 127]
     :type axes: :py:class: List[Int]
     )mydelimiter")
-    .def("get_inputs_name", &Squeeze_Op::getInputsName)
-    .def("get_outputs_name", &Squeeze_Op::getOutputsName)
+    .def_static("get_inputs_name", &Squeeze_Op::getInputsName)
+    .def_static("get_outputs_name", &Squeeze_Op::getOutputsName)
     .def("axes", &Squeeze_Op::axes);
 
     declare_registrable<Squeeze_Op>(m, "SqueezeOp");
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index 7ef8af8b6..c21a7bcfa 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -28,8 +28,8 @@ void init_Unsqueeze(py::module &m) {
             :type axes: :py:class: List[Int]
 		)mydelimiter")
       // Here we bind the methods of the Unsqueeze_Op that will want to access
-      .def("get_inputs_name", &Unsqueeze_Op::getInputsName)
-      .def("get_outputs_name", &Unsqueeze_Op::getOutputsName)
+      .def_static("get_inputs_name", &Unsqueeze_Op::getInputsName)
+      .def_static("get_outputs_name", &Unsqueeze_Op::getOutputsName)
       .def_readonly_static("Type", &Unsqueeze_Op::Type)
       ;
 
-- 
GitLab


From b9b8569073377ec1d37010188a47cedfd40a1739 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Tue, 18 Feb 2025 08:39:04 +0000
Subject: [PATCH 16/31] Add attributesName function in C++ and Python API.

---
 include/aidge/operator/ArgMax.hpp             |   8 ++
 include/aidge/operator/AvgPooling.hpp         |   8 ++
 include/aidge/operator/BatchNorm.hpp          |   8 ++
 include/aidge/operator/BitShift.hpp           |  10 +-
 include/aidge/operator/Cast.hpp               |   8 ++
 include/aidge/operator/Clip.hpp               |   8 ++
 include/aidge/operator/Concat.hpp             |   8 ++
 include/aidge/operator/ConstantOfShape.hpp    |  12 +-
 include/aidge/operator/Conv.hpp               |   8 ++
 include/aidge/operator/ConvDepthWise.hpp      |   8 ++
 include/aidge/operator/DepthToSpace.hpp       |   8 ++
 include/aidge/operator/Flatten.hpp            |   8 ++
 include/aidge/operator/Fold.hpp               |   8 ++
 include/aidge/operator/Gather.hpp             |   8 ++
 include/aidge/operator/GridSample.hpp         |   8 ++
 include/aidge/operator/Heaviside.hpp          |   8 ++
 include/aidge/operator/LRN.hpp                |  10 +-
 include/aidge/operator/LeakyReLU.hpp          |   8 ++
 include/aidge/operator/MaxPooling.hpp         |   8 ++
 include/aidge/operator/Memorize.hpp           |   8 ++
 include/aidge/operator/Pad.hpp                |   8 ++
 include/aidge/operator/Pop.hpp                |   8 ++
 include/aidge/operator/ReduceMean.hpp         |   8 ++
 include/aidge/operator/ReduceSum.hpp          |   8 ++
 include/aidge/operator/Reshape.hpp            |   8 ++
 include/aidge/operator/Resize.hpp             |   8 ++
 include/aidge/operator/Scaling.hpp            |   8 ++
 include/aidge/operator/Shape.hpp              |   8 ++
 include/aidge/operator/Slice.hpp              |   8 ++
 include/aidge/operator/Softmax.hpp            |   8 ++
 include/aidge/operator/Split.hpp              |   8 ++
 include/aidge/operator/Squeeze.hpp            |   8 ++
 include/aidge/operator/Stack.hpp              |   8 ++
 include/aidge/operator/Transpose.hpp          |   8 ++
 include/aidge/operator/Unfold.hpp             |   8 ++
 include/aidge/operator/Unsqueeze.hpp          |   8 ++
 python_binding/operator/pybind_ArgMax.cpp     |   8 ++
 python_binding/operator/pybind_AvgPooling.cpp |   9 ++
 python_binding/operator/pybind_BatchNorm.cpp  |   9 ++
 python_binding/operator/pybind_BitShift.cpp   |  10 +-
 python_binding/operator/pybind_Cast.cpp       |  10 +-
 python_binding/operator/pybind_Clip.cpp       | 127 ++++++++++--------
 python_binding/operator/pybind_Concat.cpp     |   9 ++
 .../operator/pybind_ConstantOfShape.cpp       |  12 +-
 python_binding/operator/pybind_Conv.cpp       |   9 ++
 .../operator/pybind_ConvDepthWise.cpp         |   9 ++
 .../operator/pybind_DepthToSpace.cpp          |   9 ++
 python_binding/operator/pybind_Gather.cpp     |   9 ++
 python_binding/operator/pybind_GridSample.cpp |   9 ++
 python_binding/operator/pybind_Heaviside.cpp  |   9 ++
 python_binding/operator/pybind_LRN.cpp        |   9 ++
 python_binding/operator/pybind_LeakyReLU.cpp  |   9 ++
 python_binding/operator/pybind_MaxPooling.cpp |   9 ++
 python_binding/operator/pybind_Memorize.cpp   |  10 +-
 python_binding/operator/pybind_Pad.cpp        |   8 ++
 python_binding/operator/pybind_Pop.cpp        |   9 ++
 python_binding/operator/pybind_ReduceMean.cpp |   8 ++
 python_binding/operator/pybind_ReduceSum.cpp  |   9 ++
 python_binding/operator/pybind_Reshape.cpp    |   9 ++
 python_binding/operator/pybind_Resize.cpp     |  16 ++-
 python_binding/operator/pybind_Scaling.cpp    |   9 ++
 python_binding/operator/pybind_Shape.cpp      |   9 ++
 python_binding/operator/pybind_Slice.cpp      |   9 ++
 python_binding/operator/pybind_Softmax.cpp    |   9 ++
 python_binding/operator/pybind_Split.cpp      |   9 ++
 python_binding/operator/pybind_Squeeze.cpp    |   9 ++
 python_binding/operator/pybind_Stack.cpp      |   9 ++
 python_binding/operator/pybind_Transpose.cpp  |   8 ++
 python_binding/operator/pybind_Unsqueeze.cpp  |   8 ++
 69 files changed, 647 insertions(+), 72 deletions(-)

diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index 7358899a9..6d24d87bd 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -177,6 +177,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ArgMaxAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 981f71762..bd74dbdbf 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -175,6 +175,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::AvgPoolingAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index ddffaeb02..995179d7f 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -152,6 +152,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::BatchNormAttr>::data; 
+	}
 };
 
 extern template class Aidge::BatchNorm_Op<2>;
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index 9368e3461..d066507dd 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -147,6 +147,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return { "OutputTensor" };
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::BitShiftAttr>::data;
+	}
 };
 
 /**
@@ -166,7 +174,7 @@ namespace {
  * @brief Specialization of `EnumStrings` for `BitShiftAttr`.
  */
 template <>
-const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = { "bit_shift_direction" };
+const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = {"bit_shift_direction"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 1f934fbc7..12c3a280a 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -137,6 +137,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::CastAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
index 0825b85bb..93c042d86 100644
--- a/include/aidge/operator/Clip.hpp
+++ b/include/aidge/operator/Clip.hpp
@@ -148,6 +148,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return { "data_output" };
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ClipAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index ad31ef1a3..7a4ea74a4 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -169,6 +169,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return { "data_output" };
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ConcatAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index 18e626544..d837d108a 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -63,7 +63,7 @@ private:
 public:
   /**
    * @brief constructor for ConstantOfShape_op
-   * @param[in] value : a scalar tensor which holds the value that will 
+   * @param[in] value : a scalar tensor which holds the value that will
    * fill the output tensor
    */
   ConstantOfShape_Op(const Tensor &value = Tensor(0.f))
@@ -116,6 +116,14 @@ public:
   static const std::vector<std::string> getOutputsName() {
     return {"constant_of_shape"};
   }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ConstantOfShapeAttr>::data;
+	}
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
@@ -129,7 +137,7 @@ inline std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f),
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"Value"};
+const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"value"};
 }
 
 #endif // AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 8984ebd08..7beea057e 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -209,6 +209,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ConvAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 03e821041..3090b9feb 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -189,6 +189,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ConvDepthWiseAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index 769dad767..cc51ea180 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -164,6 +164,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::DepthToSpaceAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Flatten.hpp b/include/aidge/operator/Flatten.hpp
index a7f5c6435..10ce58ad0 100644
--- a/include/aidge/operator/Flatten.hpp
+++ b/include/aidge/operator/Flatten.hpp
@@ -155,6 +155,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::FlattenAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index 3b5b9449d..9d2d4e0df 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -210,6 +210,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::FoldAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index dc3e1a814..3842a041e 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -184,6 +184,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::GatherAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index 999f7bba1..28c5fb5e5 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -170,6 +170,14 @@ public:
 	static const std::vector<std::string> getOutputsName() {
 		return {"data_output"};
 	}
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::GridSampleAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp
index 94eaa400a..874853c4e 100644
--- a/include/aidge/operator/Heaviside.hpp
+++ b/include/aidge/operator/Heaviside.hpp
@@ -110,6 +110,14 @@ public:
         return {"output"};
     }
 
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::HeavisideAttr>::data; 
+	}
+
     /**
      * @brief Get the attributes of the operator.
      */
diff --git a/include/aidge/operator/LRN.hpp b/include/aidge/operator/LRN.hpp
index 369da5f97..9019c089b 100644
--- a/include/aidge/operator/LRN.hpp
+++ b/include/aidge/operator/LRN.hpp
@@ -158,6 +158,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::LRNAttr>::data; 
+	}
 };
 
 /**
@@ -176,7 +184,7 @@ namespace {
  * @brief EnumStrings specialization for LRNAttr.
  */
 template <>
-const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size"};
+const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size", nullptr};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_LRN_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 46730d026..5381b3cb1 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -115,6 +115,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::LeakyReLUAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 8503b1be1..11b3ace26 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -182,6 +182,14 @@ public:
      * @return A vector of output tensors names.
      */
     static const std::vector<std::string> getOutputsName(){ return {"data_output"}; }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::MaxPoolingAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index deefc0077..10bbfce85 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -240,6 +240,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output", "data_output_rec"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::MemorizeAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index c1ed3500c..417e9664c 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -216,6 +216,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::PadAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 0624286f7..08d40ba79 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -198,6 +198,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::PopAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 6aded3638..c6d875719 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -165,6 +165,14 @@ public:
         return {"data_output"};
     }
 
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ReduceMeanAttr>::data; 
+	}
+
     virtual ~ReduceMean_Op() noexcept;
 };
 
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index 5a3674b21..72f6bf9b2 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -170,6 +170,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ReduceSumAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index c170ad79e..51623737e 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -176,6 +176,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ReshapeAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 89224f927..3a4ef3771 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -191,6 +191,14 @@ class Resize_Op
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ResizeAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index b33fb5841..c1f4514c9 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -134,6 +134,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ScalingAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 609e354d5..84d497abf 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -163,6 +163,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ShapeAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index d32bc4fe2..ea4d21e9a 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -203,6 +203,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::SliceAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 290132690..a7d8283a0 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -130,6 +130,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::SoftmaxAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 3c6b52d3c..9f2beb3aa 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -173,6 +173,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output_0", "data_output_n"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::SplitAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index e3c1f4de1..9a2cc8f54 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -142,6 +142,14 @@ public:
   static const std::vector<std::string> getOutputsName() {
     return {"squeezed"};
   }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::SqueezeAttr>::data; 
+	}
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
index 71e4e780a..0e420789d 100644
--- a/include/aidge/operator/Stack.hpp
+++ b/include/aidge/operator/Stack.hpp
@@ -212,6 +212,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::StackAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index ab3b18e51..d760ccd0d 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -166,6 +166,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::TransposeAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 333413b1d..bea32c6cc 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -199,6 +199,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::UnfoldAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index c25800acb..8c5909182 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -140,6 +140,14 @@ public:
   static const std::vector<std::string> getOutputsName() {
     return {"unsqueezed"};
   }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::UnsqueezeAttr>::data; 
+	}
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
diff --git a/python_binding/operator/pybind_ArgMax.cpp b/python_binding/operator/pybind_ArgMax.cpp
index 3de54afd7..75f325749 100644
--- a/python_binding/operator/pybind_ArgMax.cpp
+++ b/python_binding/operator/pybind_ArgMax.cpp
@@ -43,6 +43,14 @@ void init_ArgMax(py::module &m) {
     .def(py::init<std::int32_t, bool, bool>(), py::arg("axis"), py::arg("keep_dims"), py::arg("select_last_index"))
     .def_static("get_inputs_name", &ArgMax_Op::getInputsName)
     .def_static("get_outputs_name", &ArgMax_Op::getOutputsName)
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = ArgMax_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<ArgMaxAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     ;
   declare_registrable<ArgMax_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 852b11303..8551f3eb4 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -54,6 +54,15 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
             py::arg("stride_dims") = create_array<DimSize_t, DIM>(1))
     .def_static("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
     .def_static("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = AvgPooling_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<AvgPoolingAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 3339db0f2..199ef8134 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -42,6 +42,15 @@ void declare_BatchNormOp(py::module& m) {
             py::arg("training_mode"))
         .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = BatchNorm_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<BatchNormAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &BatchNorm_Op<DIM>::Type);
 
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_BitShift.cpp b/python_binding/operator/pybind_BitShift.cpp
index b4f6c90e5..f2f4b223d 100644
--- a/python_binding/operator/pybind_BitShift.cpp
+++ b/python_binding/operator/pybind_BitShift.cpp
@@ -35,7 +35,15 @@ void init_BitShift(py::module &m) {
         .def(py::init<BitShift_Op::BitShiftDirection>(), py::arg("direction"))
         .def("direction", &BitShift_Op::direction, "Get the direction of the bit shift (left or right).")
         .def_static("get_inputs_name", &BitShift_Op::getInputsName, "Get the names of the input tensors.")
-        .def_static("get_outputs_name", &BitShift_Op::getOutputsName, "Get the names of the output tensors.");
+        .def_static("get_outputs_name", &BitShift_Op::getOutputsName, "Get the names of the output tensors.")
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = BitShift_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<BitShiftAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		});
 
     // Enum binding under BitShiftOp class
     py::enum_<BitShift_Op::BitShiftDirection>(pyBitShiftOp, "BitShiftDirection")
diff --git a/python_binding/operator/pybind_Cast.cpp b/python_binding/operator/pybind_Cast.cpp
index 960a084ff..1e0ad7f9b 100644
--- a/python_binding/operator/pybind_Cast.cpp
+++ b/python_binding/operator/pybind_Cast.cpp
@@ -32,7 +32,15 @@ void init_Cast(py::module &m) {
         .def(py::init<DataType>(), py::arg("target_type"))
         .def("target_type", &Cast_Op::targetType, "Get the targeted type, output tensor data type")
         .def_static("get_inputs_name", &Cast_Op::getInputsName, "Get the names of the input tensors.")
-        .def_static("get_outputs_name", &Cast_Op::getOutputsName, "Get the names of the output tensors.");
+        .def_static("get_outputs_name", &Cast_Op::getOutputsName, "Get the names of the output tensors.")
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Cast_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<CastAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		});
 
     // Binding for the Cast function
     m.def("Cast", &Cast, py::arg("target_type"), py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Clip.cpp b/python_binding/operator/pybind_Clip.cpp
index 7c4563a98..a22a002d4 100644
--- a/python_binding/operator/pybind_Clip.cpp
+++ b/python_binding/operator/pybind_Clip.cpp
@@ -1,59 +1,68 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <pybind11/pybind11.h>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/operator/Clip.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/utils/Types.h"
-
-namespace py = pybind11;
-namespace Aidge {
-
-void init_Clip(py::module& m) {
-    py::class_<Clip_Op, std::shared_ptr<Clip_Op>, OperatorTensor>(m, "ClipOp", py::multiple_inheritance(),
-        R"mydelimiter(
-        Initialize a Clip operator.
-
-        :param min : Minimum clipping value. Default is the lowest possible float value.
-        :type min : :py:class:`float`
-        :param max : Maximum clipping value. Default is the highest possible float value.
-        :type max : :py:class:`float`
-        )mydelimiter")
-    .def(py::init<float, float>(), py::arg("min") = std::numeric_limits<float>::lowest(), py::arg("max") = std::numeric_limits<float>::max())
-    .def_static("get_inputs_name", &Clip_Op::getInputsName)
-    .def_static("get_outputs_name", &Clip_Op::getOutputsName)
-    .def("min", &Clip_Op::min, py::return_value_policy::reference_internal)
-    .def("max", &Clip_Op::max, py::return_value_policy::reference_internal);
-
-    declare_registrable<Clip_Op>(m, "ClipOp");
-
-    m.def("Clip", &Clip, py::arg("name") = "",
-        py::arg("min") = std::numeric_limits<float>::lowest(),
-        py::arg("max") = std::numeric_limits<float>::max(),
-        R"mydelimiter(
-        ClipOp is a tensor operator that performs a clipping operation on tensor elements.
-        This class allows limiting tensor values to a specified range, defined by the `min` 
-        and `max` parameters. Values outside this range are replaced by the corresponding 
-        limit values. When `min` is greater than `max`, the clip operator sets all the 'input' values to the value of `max`.
-
-        :param min: Minimum clipping value.
-        :type min: :py:class:`float`
-        :param max: Maximum clipping value.
-        :type max: :py:class:`float`
-        :param name: Name of the node.
-        :type name: :py:class:`str`
-        )mydelimiter");
-}
-
-}  // namespace Aidge
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Clip.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Clip(py::module& m) {
+    py::class_<Clip_Op, std::shared_ptr<Clip_Op>, OperatorTensor>(m, "ClipOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Clip operator.
+
+        :param min : Minimum clipping value. Default is the lowest possible float value.
+        :type min : :py:class:`float`
+        :param max : Maximum clipping value. Default is the highest possible float value.
+        :type max : :py:class:`float`
+        )mydelimiter")
+    .def(py::init<float, float>(), py::arg("min") = std::numeric_limits<float>::lowest(), py::arg("max") = std::numeric_limits<float>::max())
+    .def_static("get_inputs_name", &Clip_Op::getInputsName)
+    .def_static("get_outputs_name", &Clip_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Clip_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ClipAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
+    .def("min", &Clip_Op::min, py::return_value_policy::reference_internal)
+    .def("max", &Clip_Op::max, py::return_value_policy::reference_internal);
+
+    declare_registrable<Clip_Op>(m, "ClipOp");
+
+    m.def("Clip", &Clip, py::arg("name") = "",
+        py::arg("min") = std::numeric_limits<float>::lowest(),
+        py::arg("max") = std::numeric_limits<float>::max(),
+        R"mydelimiter(
+        ClipOp is a tensor operator that performs a clipping operation on tensor elements.
+        This class allows limiting tensor values to a specified range, defined by the `min` 
+        and `max` parameters. Values outside this range are replaced by the corresponding 
+        limit values. When `min` is greater than `max`, the clip operator sets all the 'input' values to the value of `max`.
+
+        :param min: Minimum clipping value.
+        :type min: :py:class:`float`
+        :param max: Maximum clipping value.
+        :type max: :py:class:`float`
+        :param name: Name of the node.
+        :type name: :py:class:`str`
+        )mydelimiter");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index d2410b03a..236f16922 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -34,6 +34,15 @@ void init_Concat(py::module& m) {
              py::arg("axis") = 0)
         .def_static("get_inputs_name", &Concat_Op::getInputsName)
         .def_static("get_outputs_name", &Concat_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Concat_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ConcatAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Concat_Op::Type);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
index 5a0e858f1..b185f2f80 100644
--- a/python_binding/operator/pybind_ConstantOfShape.cpp
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -31,9 +31,17 @@ void init_ConstantOfShape(py::module &m) {
                      that will fill the output tensor.
       :type value : :py:class:`Tensor`
       )mydelimiter")
-      .def("get_inputs_name", &ConstantOfShape_Op::getInputsName)
+      .def_static("get_inputs_name", &ConstantOfShape_Op::getInputsName)
       .def_static("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
-      .def_static("value", &ConstantOfShape_Op::value);
+      .def_static("attributes_name", []() {
+        std::vector<std::string> result;
+        auto attributes = ConstantOfShape_Op::attributesName();
+        for (size_t i = 0; i < size(EnumStrings<ConstantOfShapeAttr>::data); ++i) {
+          result.emplace_back(attributes[i]);
+        }
+        return result;
+      })
+      .def("value", &ConstantOfShape_Op::value);
 
   m.def("ConstantOfShape", &ConstantOfShape, py::arg("value") = Tensor(0.f),
         py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 6ab073be6..e65a74c0c 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -43,6 +43,15 @@ void declare_ConvOp(py::module &m) {
             py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
         .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Conv_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ConvAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def("in_channels", &Conv_Op<DIM>::inChannels)
         .def("out_channels", &Conv_Op<DIM>::outChannels)
         .def_readonly_static("Type", &Conv_Op<DIM>::Type)
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 5e24431d7..7ddbefd3d 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -56,6 +56,15 @@ void declare_ConvDepthWiseOp(py::module &m) {
         py::arg("dilation_dims"))
   .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = ConvDepthWise_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ConvDepthWiseAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+				return result;
+		})
   .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels)
   .def_readonly_static("Type", &ConvDepthWise_Op<DIM>::Type);
 
diff --git a/python_binding/operator/pybind_DepthToSpace.cpp b/python_binding/operator/pybind_DepthToSpace.cpp
index efb8a7406..d33386711 100644
--- a/python_binding/operator/pybind_DepthToSpace.cpp
+++ b/python_binding/operator/pybind_DepthToSpace.cpp
@@ -37,6 +37,15 @@ void declare_DepthToSpace(py::module &m) {
         }), py::arg("block_size"), py::arg("mode") = "CRD")
     .def_static("get_inputs_name", &DepthToSpace_Op::getInputsName)
     .def_static("get_outputs_name", &DepthToSpace_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = DepthToSpace_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<DepthToSpaceAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &DepthToSpace_Op::Type)
     .def("__repr__", [](DepthToSpace_Op& b) {
         return fmt::format("Operator(type='{}')", b.Type);
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index fed44a1e2..6afeb42a7 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -44,6 +44,15 @@ void init_Gather(py::module& m) {
                 py::arg("gathered_shape"))
         .def_static("get_inputs_name", &Gather_Op::getInputsName)
         .def_static("get_outputs_name", &Gather_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Gather_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<GatherAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Gather_Op::Type);
 
     declare_registrable<Gather_Op>(m, "GatherOp");
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index 3464941dd..f4f0335fd 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -65,6 +65,15 @@ void declare_GridSampleOp(py::module &m) {
             py::arg("align_corners") = false)
         .def_static("get_inputs_name", &GridSample_Op::getInputsName)
         .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = GridSample_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<GridSampleAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &GridSample_Op::Type)
         ;
 
diff --git a/python_binding/operator/pybind_Heaviside.cpp b/python_binding/operator/pybind_Heaviside.cpp
index cbc2502aa..b8d7f1d80 100644
--- a/python_binding/operator/pybind_Heaviside.cpp
+++ b/python_binding/operator/pybind_Heaviside.cpp
@@ -37,6 +37,15 @@ void init_Heaviside(py::module &m) {
         .def(py::init<float>(), py::arg("value"))
         .def_static("get_inputs_name", &Heaviside_Op::getInputsName)
         .def_static("get_outputs_name", &Heaviside_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Heaviside_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<HeavisideAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Heaviside_Op::Type);
 
     declare_registrable<Heaviside_Op>(m, "HeavisideOp");
diff --git a/python_binding/operator/pybind_LRN.cpp b/python_binding/operator/pybind_LRN.cpp
index bb04ed1c5..f802152ba 100644
--- a/python_binding/operator/pybind_LRN.cpp
+++ b/python_binding/operator/pybind_LRN.cpp
@@ -30,6 +30,15 @@ void init_LRN(py::module& m) {
         .def(py::init<std::int32_t>(), py::arg("size"))
         .def_static("get_inputs_name", &LRN_Op::getInputsName)
         .def_static("get_outputs_name", &LRN_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = LRN_Op::attributesName();
+			for (size_t i = 0; attributes[i] != nullptr; ++i) {
+				result.emplace_back(attributes[i]);
+			}
+				return result;
+		})
         .def_readonly_static("Type", &LRN_Op::Type);
 
     m.def("LRN", &LRN, py::arg("size"), py::arg("name") = "",
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 564fd90be..ab81052d2 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -30,6 +30,15 @@ void init_LeakyReLU(py::module& m) {
         .def(py::init<float>(), py::arg("negative_slope"))
         .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
         .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = LeakyReLU_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<LeakyReLUAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &LeakyReLU_Op::Type);
 
     declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 8834625a8..305d8def3 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -48,6 +48,15 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("ceil_mode"))
   .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
+
+  .def_static("attributes_name", []() {
+    std::vector<std::string> result;
+    auto attributes = MaxPooling_Op<DIM>::attributesName();
+    for (size_t i = 0; i < size(EnumStrings<MaxPoolingAttr>::data); ++i) {
+      result.emplace_back(attributes[i]);
+    }
+    return result;
+  })
   .def_readonly_static("Type", &MaxPooling_Op<DIM>::Type);
   
   declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Memorize.cpp b/python_binding/operator/pybind_Memorize.cpp
index 3ac112211..f583602c9 100644
--- a/python_binding/operator/pybind_Memorize.cpp
+++ b/python_binding/operator/pybind_Memorize.cpp
@@ -23,7 +23,15 @@ void init_Memorize(py::module& m) {
     py::class_<Memorize_Op, std::shared_ptr<Memorize_Op>, OperatorTensor>(m, "MemorizeOp", py::multiple_inheritance())
         .def(py::init<const std::uint32_t>(), py::arg("end_step"))
         .def_static("get_inputs_name", &Memorize_Op::getInputsName)
-        .def_static("get_outputs_name", &Memorize_Op::getOutputsName);
+        .def_static("get_outputs_name", &Memorize_Op::getOutputsName)
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Memorize_Op::attributesName();
+			for (size_t i = 0;i < size(EnumStrings<MemorizeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		});
 
     declare_registrable<Memorize_Op>(m, "MemorizeOp");
 
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index fe899a75a..7b37bb206 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -50,6 +50,14 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("borderValue") = 0.0)
     .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
     .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Pad_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<PadAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &Pad_Op<DIM>::Type);
 
   declare_registrable<Pad_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index 2040f642b..20606d24d 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -23,6 +23,15 @@ void init_Pop(py::module& m) {
     .def(py::init<>())
     .def_static("get_inputs_name", &Pop_Op::getInputsName)
     .def_static("get_outputs_name", &Pop_Op::getOutputsName)
+
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = Pop_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<PopAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     .def_readonly_static("Type", &Pop_Op::Type);
 
     m.def("Pop", &Pop, py::arg("name") = "");
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 028e45755..d29f6bfe7 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -43,6 +43,14 @@ void declare_ReduceMeanOp(py::module &m) {
     .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes") = std::vector<std::int32_t>(), py::arg("keep_dims") = true, py::arg("noop_with_empty_axes") = false)
     .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = ReduceMean_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<ReduceMeanAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     .def_readonly_static("Type", &ReduceMean_Op::Type)
     ;
   declare_registrable<ReduceMean_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp
index eaa57ef1c..f139f2e7b 100644
--- a/python_binding/operator/pybind_ReduceSum.cpp
+++ b/python_binding/operator/pybind_ReduceSum.cpp
@@ -43,6 +43,15 @@ void init_ReduceSum(py::module &m) {
     .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes"), py::arg("keep_dims"), py::arg("noop_with_empty_axes"))
     .def_static("get_inputs_name", &ReduceSum_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceSum_Op::getOutputsName)
+
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = ReduceSum_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<ReduceSumAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     ;
   declare_registrable<ReduceSum_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index e3244f5dd..d263796ce 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -35,6 +35,15 @@ void init_Reshape(py::module& m) {
     .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
     .def_static("get_inputs_name", &Reshape_Op::getInputsName)
     .def_static("get_outputs_name", &Reshape_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Reshape_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ReshapeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &Reshape_Op::Type);
 
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
index 2aa626098..10a60e1f9 100644
--- a/python_binding/operator/pybind_Resize.cpp
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -25,10 +25,18 @@ namespace Aidge {
 void init_Resize(py::module &m) {
   py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(
           m, "ResizeOp", py::multiple_inheritance())
-          .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge)
-          .def_static("get_inputs_name", &Resize_Op::getInputsName)
-          .def_static("get_outputs_name", &Resize_Op::getOutputsName)
-          .def_readonly_static("Type", &Resize_Op::Type);
+        .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge)
+        .def_static("get_inputs_name", &Resize_Op::getInputsName)
+        .def_static("get_outputs_name", &Resize_Op::getOutputsName)
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Resize_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ResizeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+		    return result;
+		})
+        .def_readonly_static("Type", &Resize_Op::Type);
 
   declare_registrable<Resize_Op>(m, "ResizeOp");
 
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index c555bca89..ba975bb06 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -41,6 +41,15 @@ void init_Scaling(py::module& m) {
              py::arg("is_output_unsigned"))
         .def_static("get_inputs_name", &Scaling_Op::getInputsName)
         .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Scaling_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ScalingAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Scaling_Op::Type);
 
     declare_registrable<Scaling_Op>(m, "ScalingOp");
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index cc7669a24..3c8974bf0 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -34,6 +34,15 @@ void init_Shape(py::module& m) {
         .def(py::init<const std::int64_t, const std::int64_t>(), py::arg("start"), py::arg("end"))
         .def_static("get_inputs_name", &Shape_Op::getInputsName)
         .def_static("get_outputs_name", &Shape_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Shape_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ShapeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Shape_Op::Type);
 
     declare_registrable<Shape_Op>(m, "ShapeOp");
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index f01751b86..1cfd63f65 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -45,6 +45,15 @@ void init_Slice(py::module& m) {
                   py::arg("steps") = std::vector<std::int64_t>())
     .def_static("get_inputs_name", &Slice_Op::getInputsName)
     .def_static("get_outputs_name", &Slice_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Slice_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<SliceAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &Slice_Op::Type);
 
     declare_registrable<Slice_Op>(m, "SliceOp");
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 093f448e4..7a4a687fd 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -30,6 +30,15 @@ void init_Softmax(py::module& m) {
         .def(py::init<std::int32_t>(), py::arg("axis"))
         .def_static("get_inputs_name", &Softmax_Op::getInputsName)
         .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Softmax_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<SoftmaxAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Softmax_Op::Type);
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
index f02a699e4..052fa277e 100644
--- a/python_binding/operator/pybind_Split.cpp
+++ b/python_binding/operator/pybind_Split.cpp
@@ -36,6 +36,15 @@ void init_Split(py::module& m) {
             py::arg("split"))
     .def_static("get_inputs_name", &Split_Op::getInputsName)
     .def_static("get_outputs_name", &Split_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Split_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<SplitAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &Split_Op::Type);
 
     declare_registrable<Split_Op>(m, "SplitOp");
diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
index f7ee4d722..7808c78da 100644
--- a/python_binding/operator/pybind_Squeeze.cpp
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -34,6 +34,15 @@ void init_Squeeze(py::module &m) {
     )mydelimiter")
     .def_static("get_inputs_name", &Squeeze_Op::getInputsName)
     .def_static("get_outputs_name", &Squeeze_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Squeeze_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<SqueezeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def("axes", &Squeeze_Op::axes);
 
     declare_registrable<Squeeze_Op>(m, "SqueezeOp");
diff --git a/python_binding/operator/pybind_Stack.cpp b/python_binding/operator/pybind_Stack.cpp
index c9bd969fa..026167446 100644
--- a/python_binding/operator/pybind_Stack.cpp
+++ b/python_binding/operator/pybind_Stack.cpp
@@ -26,6 +26,15 @@ void init_Stack(py::module &m) {
         .def(py::init<const std::uint32_t>(), py::arg("max_elements"))
         .def_static("get_inputs_name", &StackOp::getInputsName)
         .def_static("get_outputs_name", &StackOp::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = StackOp::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<StackAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &StackOp::s_type);
 
     m.def("Stack",
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index 20794a155..1882aa4c4 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -38,6 +38,14 @@ void declare_Transpose(py::module &m) {
     .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order")=std::vector<std::size_t>())
     .def_static("get_inputs_name", &Transpose_Op::getInputsName)
     .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = Transpose_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<TransposeAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     .def_readonly_static("Type", &Transpose_Op::Type);
   declare_registrable<Transpose_Op>(m, pyClassName);
   m.def("Transpose", &Transpose, py::arg("output_dims_order")=std::vector<std::size_t>(), py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index c21a7bcfa..1ef94202c 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -30,6 +30,14 @@ void init_Unsqueeze(py::module &m) {
       // Here we bind the methods of the Unsqueeze_Op that will want to access
       .def_static("get_inputs_name", &Unsqueeze_Op::getInputsName)
       .def_static("get_outputs_name", &Unsqueeze_Op::getOutputsName)
+        .def_static("attributes_name", []() {
+            std::vector<std::string> result;
+            auto attributes = Unsqueeze_Op::attributesName();
+            for (size_t i = 0; i < size(EnumStrings<UnsqueezeAttr>::data); ++i) {
+                result.emplace_back(attributes[i]);
+            }
+            return result;
+        })
       .def_readonly_static("Type", &Unsqueeze_Op::Type)
       ;
 
-- 
GitLab


From 740a843e984b3971a1e08c3dd3b4fc203fd5f667 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Tue, 18 Feb 2025 09:03:28 +0000
Subject: [PATCH 17/31] [Test] Add a test to ensure attributes follow snake
 case convention.

---
 aidge_core/unit_tests/test_naming.py | 40 ++++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)
 create mode 100644 aidge_core/unit_tests/test_naming.py

diff --git a/aidge_core/unit_tests/test_naming.py b/aidge_core/unit_tests/test_naming.py
new file mode 100644
index 000000000..af86dd050
--- /dev/null
+++ b/aidge_core/unit_tests/test_naming.py
@@ -0,0 +1,40 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+import inspect
+import re
+
+def is_snake_case(s: str) -> bool:
+    return bool(re.fullmatch(r'^[a-z]+(_[a-z]+)*$', s))
+
+class test_naming(unittest.TestCase):
+    """Test tensor binding
+    """
+    def setUp(self):
+        pass
+    def tearDown(self):
+        pass
+
+    def test_attributes_name(self):
+
+        for obj in inspect.getmembers(aidge_core):
+            if (inspect.isclass(obj[1]) and issubclass(obj[1], aidge_core.Operator) and obj[1] is not aidge_core.Operator) and hasattr(obj[1], "attributes_name"):
+                print(obj[0])
+                print(obj[1].attributes_name())
+                for attr_name in obj[1].attributes_name():
+                    self.assertTrue(is_snake_case(attr_name), f"Operator {obj[0]} has an attribute {attr_name} that is not in snake_case.")
+
+
+
+        pass
+if __name__ == '__main__':
+    unittest.main()
-- 
GitLab


From 629ed0d9c138f45cbf9592bf86996e55989fec74 Mon Sep 17 00:00:00 2001
From: Cyril Moineau <cyril.moineau@cea.fr>
Date: Tue, 18 Feb 2025 09:11:37 +0000
Subject: [PATCH 18/31] Apply 1 suggestion(s) to 1 file(s)

---
 aidge_core/unit_tests/test_naming.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/aidge_core/unit_tests/test_naming.py b/aidge_core/unit_tests/test_naming.py
index af86dd050..eed7180ce 100644
--- a/aidge_core/unit_tests/test_naming.py
+++ b/aidge_core/unit_tests/test_naming.py
@@ -35,6 +35,5 @@ class test_naming(unittest.TestCase):
 
 
 
-        pass
 if __name__ == '__main__':
     unittest.main()
-- 
GitLab


From 9b3579590d612d89cd36f42d47bb396670ef14af Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Wed, 19 Feb 2025 09:57:29 +0000
Subject: [PATCH 19/31] Move declaration enumstring attr for clang
 compatibility.

---
 include/aidge/operator/ArgMax.hpp             | 26 +++---
 include/aidge/operator/AvgPooling.hpp         | 26 +++---
 include/aidge/operator/BatchNorm.hpp          | 14 +--
 include/aidge/operator/BitShift.hpp           | 18 ++--
 include/aidge/operator/Cast.hpp               | 14 +--
 include/aidge/operator/Clip.hpp               | 21 +++--
 include/aidge/operator/Concat.hpp             | 24 ++---
 include/aidge/operator/ConstantOfShape.hpp    | 11 ++-
 include/aidge/operator/Conv.hpp               | 31 ++++---
 include/aidge/operator/ConvDepthWise.hpp      | 30 +++---
 include/aidge/operator/DepthToSpace.hpp       | 13 +--
 include/aidge/operator/Flatten.hpp            | 13 +--
 include/aidge/operator/Fold.hpp               | 28 +++---
 include/aidge/operator/Gather.hpp             | 12 ++-
 include/aidge/operator/GridSample.hpp         | 21 +++--
 include/aidge/operator/Heaviside.hpp          | 18 ++--
 include/aidge/operator/LRN.hpp                | 30 +++---
 include/aidge/operator/LeakyReLU.hpp          | 18 ++--
 include/aidge/operator/MaxPooling.hpp         | 19 ++--
 include/aidge/operator/Memorize.hpp           | 29 +++---
 include/aidge/operator/Pad.hpp                | 66 ++++++-------
 include/aidge/operator/Pop.hpp                | 23 ++---
 include/aidge/operator/Producer.hpp           | 92 +++++++++----------
 include/aidge/operator/ReduceMean.hpp         | 21 +++--
 include/aidge/operator/ReduceSum.hpp          | 15 +--
 include/aidge/operator/Reshape.hpp            | 29 +++---
 include/aidge/operator/Resize.hpp             | 23 ++---
 include/aidge/operator/Scaling.hpp            | 25 ++---
 include/aidge/operator/Shape.hpp              | 20 ++--
 include/aidge/operator/Slice.hpp              | 14 +--
 include/aidge/operator/Softmax.hpp            | 20 ++--
 include/aidge/operator/Split.hpp              | 19 ++--
 include/aidge/operator/Squeeze.hpp            | 14 +--
 include/aidge/operator/Stack.hpp              | 19 ++--
 include/aidge/operator/Transpose.hpp          | 21 +++--
 include/aidge/operator/Unfold.hpp             | 31 ++++---
 include/aidge/operator/Unsqueeze.hpp          | 14 +--
 include/aidge/operator/WeightInterleaving.hpp | 10 +-
 38 files changed, 463 insertions(+), 429 deletions(-)

diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index 6d24d87bd..bc97e1f5b 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -41,20 +41,28 @@ enum class ArgMaxAttr {
      */
     SelectLastIndex
 };
-
+} // namespace Aidge
+/**
+ * @brief Provides string representations for the ArgMaxAttr enumeration.
+ */
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
+}
+namespace Aidge {
 /**
  * @brief Description of the ArgMax operation on a Tensor.
  *
  * The ArgMax operation identifies the index of the maximum value along a specified axis of a Tensor.
  *
- * The output of the ArgMax operation can retain the dimensionality of the input Tensor or reduce 
- * it by removing the specified axis. Additionally, in cases where multiple maximum values exist, 
+ * The output of the ArgMax operation can retain the dimensionality of the input Tensor or reduce
+ * it by removing the specified axis. Additionally, in cases where multiple maximum values exist,
  * the user can specify whether to select the first or the last occurrence of the maximum value.
  *
  * Attributes:
  * - `Axis`: The axis along which the ArgMax operation is performed. For example, if the axis is `0`,
  *   the operation is applied along rows; if it is `1`, it is applied along columns.
- * - `KeepDims`: A boolean indicating whether to retain the reduced axis as a dimension of size `1` 
+ * - `KeepDims`: A boolean indicating whether to retain the reduced axis as a dimension of size `1`
  *   (`true`) or to completely remove it (`false`).
  * - `SelectLastIndex`: A boolean indicating how to handle ties (multiple maximum values along the axis):
  *   - If `true`, the last index of the maximum value is selected.
@@ -183,7 +191,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ArgMaxAttr>::data; 
+		return EnumStrings<Aidge::ArgMaxAttr>::data;
 	}
 };
 
@@ -206,12 +214,6 @@ std::shared_ptr<Node> ArgMax(std::int32_t axis = 0,
 
 }  // namespace Aidge
 
-/**
- * @brief Provides string representations for the ArgMaxAttr enumeration.
- */
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
-}
+
 
 #endif /* AIDGE_CORE_OPERATOR_ARGMAX_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index bd74dbdbf..c929e1b18 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -40,7 +40,18 @@ enum class AvgPoolingAttr {
      */
     KernelDims
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief String representation of the AvgPooling attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
+        "stride_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @brief Class representing an Average Pooling operation.
  *
@@ -181,7 +192,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::AvgPoolingAttr>::data; 
+		return EnumStrings<Aidge::AvgPoolingAttr>::data;
 	}
 };
 
@@ -224,15 +235,6 @@ extern template class Aidge::AvgPooling_Op<2>;
 extern template class Aidge::AvgPooling_Op<3>;
 extern template class Aidge::AvgPooling_Op<4>;
 
-namespace {
-/**
- * @brief String representation of the AvgPooling attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
-    "stride_dims",
-    "kernel_dims"
-};
-}
+
 
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 995179d7f..3521c9b16 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -50,7 +50,12 @@ enum class BatchNormAttr {
    */
   TrainingMode
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum", "training_mode" };
+}
+namespace Aidge {
 /**
  * @class BatchNorm_Op
  * @brief Implements the Batch Normalization (BN) operation, a technique used to normalize the inputs of a layer.
@@ -158,7 +163,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::BatchNormAttr>::data; 
+		return EnumStrings<Aidge::BatchNormAttr>::data;
 	}
 };
 
@@ -178,9 +183,4 @@ extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const bool, const std::string&);
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const bool, const std::string&);
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum", "training_mode" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_BATCHNORM_H_ */
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index d066507dd..3e9f8c3f2 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -32,7 +32,15 @@ enum class BitShiftAttr {
      */
     BitShiftdirection
 };
-
+}
+namespace {
+    /**
+     * @brief Specialization of `EnumStrings` for `BitShiftAttr`.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = {"bit_shift_direction"};
+}
+namespace Aidge {
 /**
  * @class BitShift_Op
  * @brief A tensor operator to perform element-wise bitwise shift operations on tensors.
@@ -169,12 +177,6 @@ inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direc
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Specialization of `EnumStrings` for `BitShiftAttr`.
- */
-template <>
-const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = {"bit_shift_direction"};
-}
+
 
 #endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 12c3a280a..b2ffbb553 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -40,7 +40,12 @@ enum class CastAttr {
      */
     TargetType
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
+}
+namespace Aidge {
 /**
  * @brief Description of the Cast operation to convert a tensor's data type.
  *
@@ -143,7 +148,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::CastAttr>::data; 
+		return EnumStrings<Aidge::CastAttr>::data;
 	}
 };
 
@@ -157,9 +162,4 @@ std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name =
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
index 93c042d86..51ecb6eb3 100644
--- a/include/aidge/operator/Clip.hpp
+++ b/include/aidge/operator/Clip.hpp
@@ -33,14 +33,23 @@ enum class ClipAttr {
     Min,  /**< Minimum value for clipping. */
     Max   /**< Maximum value for clipping. */
 };
+}
+namespace {
+    /**
+     * @brief Specialization of EnumStrings for ClipAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::ClipAttr>::data[] = { "min", "max" };
+}
 
+namespace Aidge {
 /**
  * @brief Description of the Clip operation to limit tensor values within a specified range.
  *
  * The Clip operator ensures tensor elements are within the range `[min, max]`.
  * - Values less than `min` are set to `min`.
  * - Values greater than `max` are set to `max`.
- * 
+ *
  * The input and output Tensors have the same dimensions.
  *
  * ### Attributes:
@@ -154,7 +163,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ClipAttr>::data; 
+		return EnumStrings<Aidge::ClipAttr>::data;
 	}
 };
 
@@ -173,12 +182,4 @@ std::shared_ptr<Aidge::Node> Clip(
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Specialization of EnumStrings for ClipAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::ClipAttr>::data[] = { "min", "max" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_CLIP_H_ */
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 7a4ea74a4..1f8a357a8 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -58,7 +58,17 @@ enum class ConcatAttr {
      */
     Axis
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief Specialization of EnumStrings for ConcatAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
+        "axis"
+    };
+}
+namespace Aidge {
 /**
  * @class Concat_Op
  * @brief Implements the Concat operation to concatenate multiple tensors along a specified axis.
@@ -175,7 +185,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ConcatAttr>::data; 
+		return EnumStrings<Aidge::ConcatAttr>::data;
 	}
 };
 
@@ -190,14 +200,4 @@ std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0,
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Specialization of EnumStrings for ConcatAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
-    "axis"
-};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_CONCAT_H_ */
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index d837d108a..6176f69dd 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -40,6 +40,12 @@ enum class ConstantOfShapeAttr {
   Value,
 };
 
+namespace {
+  template <>
+  const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"value"};
+  }
+  
+
 /**
  * @brief This operator's purpose is to generate a tensor of shape given via
  * input and filled with a given value set via attribute.
@@ -135,10 +141,5 @@ inline std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f),
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"value"};
-}
-
 #endif // AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
 
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 7beea057e..135ff8860 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -40,15 +40,24 @@ enum class ConvAttr {
     DilationDims,   // The dilation dimensions
     KernelDims      // The kernel dimensions
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
+        "stride_dims",
+        "dilation_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @class Conv_Op
  * @brief Convolution operator for performing a multi-dimensional convolution.
- * 
- * The Conv_Op class implements a convolution operator for tensors with customizable 
- * kernel dimensions, stride, and dilation values. The operator performs a convolution 
+ *
+ * The Conv_Op class implements a convolution operator for tensors with customizable
+ * kernel dimensions, stride, and dilation values. The operator performs a convolution
  * operation on the input tensor and produces an output tensor.
- * 
+ *
  * ### Attributes:
  * - `strideDims`: Stride for each dimension of the input.
  * - `dilationDims`: Dilation for each dimension of the input.
@@ -63,7 +72,7 @@ enum class ConvAttr {
  *      - Stride dimensions: {1, 1} (stride of 1 in both height and width)
  *      - Dilation dimensions: {1, 1} (no dilation)
  *      - Padding: None
- *      - Output shape: 
+ *      - Output shape:
  *         (1, 64, (32−3+2×0)/1+1, (32−3+2×0)/1+1) = (1, 64, 30, 30)
  *
  * @see OperatorTensor
@@ -215,7 +224,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ConvAttr>::data; 
+		return EnumStrings<Aidge::ConvAttr>::data;
 	}
 };
 
@@ -268,13 +277,5 @@ inline std::shared_ptr<Node> Conv(
 extern template class Aidge::Conv_Op<1>;
 extern template class Aidge::Conv_Op<2>;
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
-    "stride_dims",
-    "dilation_dims",
-    "kernel_dims"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 3090b9feb..b307d67a6 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -34,15 +34,24 @@ enum class ConvDepthWiseAttr {
     DilationDims, // The dilation dimensions for the convolution.
     KernelDims    // The kernel dimensions for the convolution.
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {
+        "stride_dims",
+        "dilation_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @class ConvDepthWise_Op
  * @brief Depthwise Convolution operator for performing a multi-dimensional depthwise convolution.
- * 
- * The ConvDepthWise_Op class implements a depthwise convolution operator for tensors with customizable 
- * kernel dimensions, stride, and dilation values. It performs a depthwise convolution operation on the 
+ *
+ * The ConvDepthWise_Op class implements a depthwise convolution operator for tensors with customizable
+ * kernel dimensions, stride, and dilation values. It performs a depthwise convolution operation on the
  * input tensor and produces an output tensor.
- * 
+ *
  * ### Attributes:
  * - strideDims: Stride for each dimension of the input.
  * - dilationDims: Dilation for each dimension of the input.
@@ -195,7 +204,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ConvDepthWiseAttr>::data; 
+		return EnumStrings<Aidge::ConvDepthWiseAttr>::data;
 	}
 };
 
@@ -245,13 +254,4 @@ inline std::shared_ptr<Node> ConvDepthWise(
 extern template class Aidge::ConvDepthWise_Op<1>;
 extern template class Aidge::ConvDepthWise_Op<2>;
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {
-    "stride_dims",
-    "dilation_dims",
-    "kernel_dims"
-};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index cc51ea180..c99f7bbb7 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -51,7 +51,12 @@ enum class DepthToSpaceAttr {
     BlockSize, /**< The block size for rearranging depth to spatial dimensions. */
     Mode       /**< The mode for depth-to-space transformation. */
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
+}
+namespace Aidge{
 /**
  * @class DepthToSpace_Op
  * @brief Represents the DepthToSpace operation to rearrange data from depth to spatial dimensions.
@@ -170,7 +175,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::DepthToSpaceAttr>::data; 
+		return EnumStrings<Aidge::DepthToSpaceAttr>::data;
 	}
 };
 
@@ -187,9 +192,5 @@ std::shared_ptr<Node> DepthToSpace(const std::uint32_t blockSize,
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
-}
 
 #endif //AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
diff --git a/include/aidge/operator/Flatten.hpp b/include/aidge/operator/Flatten.hpp
index 10ce58ad0..b61fc6912 100644
--- a/include/aidge/operator/Flatten.hpp
+++ b/include/aidge/operator/Flatten.hpp
@@ -54,7 +54,12 @@ enum class FlattenAttr {
      */
     Axis
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::FlattenAttr>::data[] = { "axis" };
+}
+namespace Aidge {
 /**
  * @brief Description the Flatten operation to reshape a tensor into a 2D matrix.
  *
@@ -161,7 +166,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::FlattenAttr>::data; 
+		return EnumStrings<Aidge::FlattenAttr>::data;
 	}
 };
 
@@ -179,9 +184,5 @@ std::shared_ptr<Node> Flatten(std::int64_t axis = 1,
                             const std::string &name = "");
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::FlattenAttr>::data[] = { "axis" };
-}
 
 #endif /* AIDGE_CORE_OPERATOR_FLATTEN_H_ */
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index 9d2d4e0df..2f9974e8e 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -64,7 +64,17 @@ enum class FoldAttr {
      */
     KernelDims
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::FoldAttr>::data[] = {
+        "output_dims",
+        "stride_dims",
+        "dilation_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @class Fold_Op
  * @brief Implements the Fold operation to combine or transform tensor dimensions.
@@ -82,7 +92,7 @@ enum class FoldAttr {
  *       output height (out_h) = floor((input height - kernel height) / stride height) + 1
  *       output width (out_w) = floor((input width - kernel width) / stride width) + 1
  *      - The exact output shape will depend on these calculations for each spatial dimension (height, width) and the number of output channels.
- *         
+ *
  * @example:
  *  - Input shape: (1, 16, 32, 32)  // Batch size: 1, Channels: 16, Height: 32, Width: 32
  *  - Kernel dimensions: (3, 3)  // 3x3 kernel
@@ -216,13 +226,13 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::FoldAttr>::data; 
+		return EnumStrings<Aidge::FoldAttr>::data;
 	}
 };
 
 /**
  * @brief Create a Fold operation node.
- * 
+ *
  * This function creates a Fold operation node that applies a fold transformation
  * to a tensor based on the specified attributes.
  *
@@ -255,14 +265,4 @@ extern template class Aidge::Fold_Op<2>;
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::FoldAttr>::data[] = {
-    "output_dims",
-    "stride_dims",
-    "dilation_dims",
-    "kernel_dims"
-};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_FOLD_H_ */
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 3842a041e..86fc7bc78 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -61,6 +61,12 @@ enum class GatherAttr {
     GatheredShape
 };
 
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
+}
+namespace Aidge {
 /**
  * @brief Description for the Gather operation on an input tensor.
  *
@@ -190,7 +196,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::GatherAttr>::data; 
+		return EnumStrings<Aidge::GatherAttr>::data;
 	}
 };
 
@@ -213,9 +219,5 @@ std::shared_ptr<Node> Gather(std::int8_t axis = 0,
 
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index 28c5fb5e5..066422311 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -29,6 +29,16 @@ enum class GridSampleAttr {
 	PaddingMode,	// Specifies how to handle out-of-boundary grid values.
 	AlignCorners	// Determines whether grid values are normalized to align with the image corners.
 };
+} // namespace Aidge
+namespace {
+	template <>
+	const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
+		"mode",
+		"padding_mode",
+		"align_corners"
+	};
+}
+namespace Aidge {
 
 /**
  * @class GridSample_Op
@@ -176,7 +186,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::GridSampleAttr>::data; 
+		return EnumStrings<Aidge::GridSampleAttr>::data;
 	}
 };
 
@@ -197,13 +207,4 @@ std::shared_ptr<Node> GridSample(
 
 } // namespace Aidge
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
-    "mode",
-    "padding_mode",
-    "align_corners"
-};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_GRIDSAMPLE_H_ */
diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp
index 874853c4e..806ed47f3 100644
--- a/include/aidge/operator/Heaviside.hpp
+++ b/include/aidge/operator/Heaviside.hpp
@@ -31,6 +31,15 @@ enum class HeavisideAttr {
      */
     Value
 };
+} // namespace Aidge
+namespace {
+    /**
+     * @brief Define string representations for Heaviside attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"};
+}
+namespace Aidge {
 
 /**
  * @class Heaviside_Op
@@ -115,7 +124,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::HeavisideAttr>::data; 
+		return EnumStrings<Aidge::HeavisideAttr>::data;
 	}
 
     /**
@@ -149,12 +158,5 @@ std::shared_ptr<Node> Heaviside(float value, const std::string &name = "");
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Define string representations for Heaviside attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_HEAVISIDE_H_ */
diff --git a/include/aidge/operator/LRN.hpp b/include/aidge/operator/LRN.hpp
index 9019c089b..6c82b6b46 100644
--- a/include/aidge/operator/LRN.hpp
+++ b/include/aidge/operator/LRN.hpp
@@ -30,20 +30,28 @@ enum class LRNAttr {
     Bias,   ///< Constant bias added to the normalization term.
     Size    ///< Number of channels to normalize over.
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for LRNAttr.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size", nullptr};
+}
+namespace Aidge {
 /**
  * @brief Description of a Local Response Normalization (LRN) operation on an input Tensor.
  *
- * LRN is a normalization technique that applies across channels in a local region 
- * to enhance generalization and promote competition between neurons. It is commonly 
+ * LRN is a normalization technique that applies across channels in a local region
+ * to enhance generalization and promote competition between neurons. It is commonly
  * used in Convolutional Neural Networks (CNNs).
  *
  * For each element x in the input Tensor, the function is defined as:
  * `f(x) = x / (bias + alpha * sum(x_i^2))^beta`, where:
  * - `x` is the current element being normalized.
- * - The summation `sum(x_i^2)` is taken over a local region of `size` channels 
+ * - The summation `sum(x_i^2)` is taken over a local region of `size` channels
  *   surrounding `x` (both before and after the current channel, if available).
- * - `bias`, `alpha`, and `beta` are scalar hyperparameters controlling the 
+ * - `bias`, `alpha`, and `beta` are scalar hyperparameters controlling the
  *   normalization behavior.
  *
  * Parameters:
@@ -52,7 +60,7 @@ enum class LRNAttr {
  * - `alpha`: A scaling factor for the squared sum of elements in the local region.
  * - `beta`: The exponent applied to the normalization term.
  *
- * The input and output Tensors have the same shape. If the input Tensor has shape `(N, C, H, W)`, 
+ * The input and output Tensors have the same shape. If the input Tensor has shape `(N, C, H, W)`,
  * the output Tensor will also have shape `(N, C, H, W)`.
  *
  * @see OperatorTensor
@@ -164,7 +172,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::LRNAttr>::data; 
+		return EnumStrings<Aidge::LRNAttr>::data;
 	}
 };
 
@@ -179,12 +187,4 @@ std::shared_ptr<Node> LRN(std::int32_t size, const std::string& name = "");
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for LRNAttr.
- */
-template <>
-const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size", nullptr};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_LRN_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 5381b3cb1..acf9bae7f 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -30,7 +30,13 @@ enum class LeakyReLUAttr {
      */
     NegativeSlope
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
+        = {"negative_slope"};
+    }
+namespace Aidge{
 /**
  * @class LeakyReLU_Op
  * @brief Implements the LeakyReLU activation function.
@@ -77,7 +83,7 @@ public:
     /**
      * @brief Copy-constructor.
      * @param[in] op LeakyReLU_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not its input tensors. 
+     * @details Copies the operator attributes and its output tensor(s), but not its input tensors.
      * The new operator has no associated input.
      */
     LeakyReLU_Op(const LeakyReLU_Op& op);
@@ -121,7 +127,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::LeakyReLUAttr>::data; 
+		return EnumStrings<Aidge::LeakyReLUAttr>::data;
 	}
 };
 
@@ -135,10 +141,4 @@ public:
 std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "");
 }
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
-    = {"negative_slope"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_LEAKYRELU_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 11b3ace26..6105fe12c 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -56,6 +56,16 @@ enum class MaxPoolingAttr {
    */
   CeilMode,
 };
+} // namespace Aidge
+namespace {
+    /**
+     * @brief String representations of MaxPooling attributes for debugging and logging.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "ceil_mode"};
+    }
+
+namespace Aidge{
 
 /**
  * @class MaxPooling_Op
@@ -188,7 +198,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::MaxPoolingAttr>::data; 
+		return EnumStrings<Aidge::MaxPoolingAttr>::data;
 	}
 };
 
@@ -235,12 +245,5 @@ inline std::shared_ptr<Node> MaxPooling(
 
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief String representations of MaxPooling attributes for debugging and logging.
- */
-template <>
-const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "ceil_mode"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 10bbfce85..59df17ec1 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -120,10 +120,22 @@ enum class MemorizeAttr {
     ForwardStep,    // Tracks the current step in the forward pass.
     EndStep         // The final step for which memory updates will occur.
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief String representations of the Memorize operator's attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
+        "schedule_step",
+        "forward_step",
+        "end_step"
+    };
+}
+namespace Aidge {
 /**
  * @class Memorize_Op
- * @brief The Memorize Operator is responsible for storing a tensor's state over a defined 
+ * @brief The Memorize Operator is responsible for storing a tensor's state over a defined
  * number of iterations and providing the stored value as output at each iteration.
  *
  *  Memorize operators are used in models with recurrent structures or feedback loops, such as LSTMs.
@@ -246,7 +258,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::MemorizeAttr>::data; 
+		return EnumStrings<Aidge::MemorizeAttr>::data;
 	}
 };
 
@@ -259,16 +271,5 @@ public:
 std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "");
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief String representations of the Memorize operator's attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
-    "schedule_step",
-    "forward_step",
-    "end_step"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_MEMORIZE_H_ */
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 417e9664c..de7c3d2b2 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -36,6 +36,18 @@ enum class PadAttr {
     BorderValue      ///< Value to be used for constant padding.
 };
 
+namespace {
+    /**
+     * @brief EnumStrings specialization for PadAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::PadAttr>::data[] = {
+        "begin_end_borders",
+        "border_type",
+        "border_value"
+    };
+}  // namespace
+
 /**
  * @enum PadBorderType
  * @brief Types of border handling available for padding.
@@ -47,7 +59,19 @@ enum class PadBorderType {
     Wrap,     ///< Values wrap around the tensor dimensions.
     Zero      ///< All out-of-bound values are set to 0.
 };
-
+} // namespace Aidge
+/**
+ * @brief EnumStrings specialization for PadBorderType.
+ */
+template <>
+const char* const EnumStrings<Aidge::PadBorderType>::data[] = {
+    "Constant",
+    "Edge",
+    "Reflect",
+    "Wrap",
+    "Zero"
+};
+namespace Aidge {
 /**
  * @class Pad_Op
  * @brief Implementation of the Pad operator.
@@ -64,14 +88,14 @@ enum class PadBorderType {
  * The operator supports various border handling techniques (e.g., constant padding, reflection, wrapping).
  *
  * ### Output Tensor Shape:
- * If the input tensor has a shape `[B, C, d1, d2, ..., dN]`, where `B` is the batch size, 
- * `C` is the number of channels, and `[d1, d2, ..., dN]` are the spatial dimensions, 
- * and the padding is defined by `beginEndTuples = {b1, e1, b2, e2, ..., bN, eN}`, 
+ * If the input tensor has a shape `[B, C, d1, d2, ..., dN]`, where `B` is the batch size,
+ * `C` is the number of channels, and `[d1, d2, ..., dN]` are the spatial dimensions,
+ * and the padding is defined by `beginEndTuples = {b1, e1, b2, e2, ..., bN, eN}`,
  * the output tensor shape will be:
- * 
+ *
  * `[B, C, d1 + b1 + e1, d2 + b2 + e2, ..., dN + bN + eN]`.
- * 
- * The padding values `b_i` and `e_i` specify the number of elements to add before and after 
+ *
+ * The padding values `b_i` and `e_i` specify the number of elements to add before and after
  * the corresponding spatial dimension `d_i`. Batch size and channel count remain unchanged.
  *
  * @example Constant Padding:
@@ -92,7 +116,7 @@ enum class PadBorderType {
  *    - Output tensor shape: `[B, C, 4 + 1 + 1, 5 + 2 + 2, 6 + 0 + 0] = [B, C, 6, 9, 6]`
  *    - Padding values mirror the existing tensor values.
  *
- * This operator is commonly used for image processing, extending spatial dimensions while maintaining 
+ * This operator is commonly used for image processing, extending spatial dimensions while maintaining
  * batch and channel consistency, or aligning tensor dimensions in machine learning workflows.
  */
 template <DimIdx_t DIM>
@@ -222,7 +246,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::PadAttr>::data; 
+		return EnumStrings<Aidge::PadAttr>::data;
 	}
 };
 
@@ -258,30 +282,6 @@ inline std::shared_ptr<Node> Pad(
 extern template class Aidge::Pad_Op<1>;
 extern template class Aidge::Pad_Op<2>;
 
-namespace {
-
-/**
- * @brief EnumStrings specialization for PadAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::PadAttr>::data[] = {
-    "begin_end_borders",
-    "border_type",
-    "border_value"
-};
 
-/**
- * @brief EnumStrings specialization for PadBorderType.
- */
-template <>
-const char* const EnumStrings<Aidge::PadBorderType>::data[] = {
-    "Constant",
-    "Edge",
-    "Reflect",
-    "Wrap",
-    "Zero"
-};
-
-}  // namespace
 
 #endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 08d40ba79..3d9b97933 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -95,7 +95,17 @@ public:
 enum class PopAttr {
     ForwardStep     // Tracks the current step in the forward pass
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief String representations of the `Pop` operator's attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::PopAttr>::data[] = {
+        "forward_step"
+    };
+}
+namespace Aidge {
 /**
  * @class Pop_Op
  * @brief The `Pop` operator is responsible for removing and outputting elements from a data structure.
@@ -204,7 +214,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::PopAttr>::data; 
+		return EnumStrings<Aidge::PopAttr>::data;
 	}
 };
 
@@ -216,14 +226,5 @@ public:
 std::shared_ptr<Node> Pop(const std::string& name = "");
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief String representations of the `Pop` operator's attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::PopAttr>::data[] = {
-    "forward_step"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_POP_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 1d6b96582..3690579d3 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -35,25 +35,33 @@ namespace Aidge {
  * @brief Attributes specific to the `Producer_Op` class.
  */
 enum class ProdAttr { Constant };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief Enum string representation for `ProdAttr`.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::ProdAttr>::data[] = {"constant"};
+}
+namespace Aidge {
 /**
  * @class Producer_Op
  * @brief Represents an operator that stores a tensor in memory and provides it as an output.
- * 
- * The `Producer_Op` class is a specialized operator designed to store a tensor in memory 
- * and return it as an output tensor. It is typically used to store parameters or input 
- * values for a computational graph. A `Producer_Op` does not have any input data, parameters, 
- * or attributes, making it a fundamental building block for constant or initialized values 
+ *
+ * The `Producer_Op` class is a specialized operator designed to store a tensor in memory
+ * and return it as an output tensor. It is typically used to store parameters or input
+ * values for a computational graph. A `Producer_Op` does not have any input data, parameters,
+ * or attributes, making it a fundamental building block for constant or initialized values
  * within the graph.
- * 
+ *
  * Key characteristics of a `Producer_Op`:
  * - No inputs: The operator does not accept any input tensors.
  * - No parameters or attributes: It is solely responsible for producing an output tensor.
  * - Stores and returns a tensor: The stored tensor is accessible as the operator's output.
- * 
- * This operator is useful for scenarios where fixed or pre-initialized tensors need to 
+ *
+ * This operator is useful for scenarios where fixed or pre-initialized tensors need to
  * be introduced into a graph, such as weights, biases, or constant values.
- * 
+ *
  * @see OperatorTensor
  * @see Registrable
  */
@@ -77,7 +85,7 @@ public:
 
     /**
      * @brief Constructs a `Producer_Op` object with specific dimensions.
-     * 
+     *
      * @tparam DIM The number of dimensions for the tensor.
      * @param[in] dims Array defining the dimensions of the tensor.
      * @param[in] constant Indicates whether the tensor is constant.
@@ -87,7 +95,7 @@ public:
 
     /**
      * @brief Constructs a `Producer_Op` object from an existing tensor.
-     * 
+     *
      * @param[in] tensor A shared pointer to the tensor to be produced.
      * @param[in] constant Indicates whether the tensor should be constant.
      */
@@ -95,10 +103,10 @@ public:
 
     /**
      * @brief Copy constructor.
-     * 
-     * Copies the attributes and output tensors of the operator. 
+     *
+     * Copies the attributes and output tensors of the operator.
      * Input tensors are not copied, and the new operator will have no associated inputs.
-     * 
+     *
      * @param[in] op The `Producer_Op` object to copy.
      */
     Producer_Op(const Producer_Op& op);
@@ -106,28 +114,28 @@ public:
 public:
     /**
      * @brief Conversion operator to retrieve the output tensor.
-     * 
+     *
      * @return A shared pointer to the output tensor.
      */
     operator std::shared_ptr<Tensor>() const { return mOutputs[0]; }
 
     /**
      * @brief Clones the operator using the copy constructor.
-     * 
+     *
      * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
     /**
      * @brief Retrieves the dimensions of the output tensor.
-     * 
+     *
      * @return A vector containing the dimensions of the output tensor.
      */
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
     /**
      * @brief Sets the backend for the operator's execution.
-     * 
+     *
      * @param[in] name The name of the backend.
      * @param[in] device The device index (default is 0).
      */
@@ -135,35 +143,35 @@ public:
 
     /**
      * @brief Retrieves the list of available backends for this operator.
-     * 
+     *
      * @return A set containing the names of available backends.
      */
     std::set<std::string> getAvailableBackends() const override;
 
     /**
      * @brief Retrieves the operator's attributes.
-     * 
+     *
      * @return A shared pointer to the operator's attributes.
      */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     /**
      * @brief Retrieves the constant attribute.
-     * 
+     *
      * @return A reference to the constant attribute.
      */
     inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
 
     /**
      * @brief Performs the forward operation for the operator.
-     * 
+     *
      * Generates the output tensor based on the defined attributes and configuration.
      */
     void forward() override final;
 
     /**
      * @brief Placeholder for the backward operation.
-     * 
+     *
      * This function logs a debug message, as `Producer_Op` typically does not support backpropagation.
      */
     void backward() override final {
@@ -172,12 +180,12 @@ public:
 
     /**
      * @brief Associates an input tensor with the operator.
-     * 
+     *
      * This operation is not supported by `Producer_Op` as it does not take inputs.
-     * 
+     *
      * @param[in] inputIdx The index of the input.
      * @param[in] data A shared pointer to the data to associate.
-     * 
+     *
      * @throws std::runtime_error Always throws, as inputs are not supported.
      */
     void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
@@ -186,35 +194,35 @@ public:
 
     /**
      * @brief Checks whether dimensions are forwarded.
-     * 
+     *
      * @return Always true for `Producer_Op`.
      */
     inline bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
 
     /**
      * @brief Confirms that dimensions have been forwarded.
-     * 
+     *
      * @return Always true for `Producer_Op`.
      */
     inline bool dimsForwarded() const noexcept override final { return true; }
 
     /**
      * @brief Retrieves the names of the inputs for the operator.
-     * 
+     *
      * @return An empty vector, as `Producer_Op` takes no inputs.
      */
     static const std::vector<std::string> getInputsName() { return {}; }
 
     /**
      * @brief Retrieves the names of the outputs for the operator.
-     * 
+     *
      * @return A vector containing the output name "data_output".
      */
     static const std::vector<std::string> getOutputsName() { return {"data_output"}; }
 
     /**
      * @brief Sets the output tensor for the operator.
-     * 
+     *
      * @param[in] outputIdx Index of the output to set.
      * @param[in] data A shared pointer to the data.
      */
@@ -223,12 +231,12 @@ public:
 
 /**
  * @brief Helper function to create a producer node with specified dimensions.
- * 
+ *
  * @tparam DIM The number of dimensions.
  * @param[in] dims Array defining the dimensions of the tensor.
  * @param[in] name Optional name for the node.
  * @param[in] constant Indicates whether the tensor should be constant.
- * 
+ *
  * @return A shared pointer to the created node.
  */
 template <std::size_t DIM>
@@ -236,11 +244,11 @@ std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM>& dims, const std
 
 /**
  * @brief Helper function with a C-style array for dimension deduction.
- * 
+ *
  * @param[in] dims C-style array defining the tensor dimensions.
  * @param[in] name Optional name for the node.
  * @param[in] constant Indicates whether the tensor should be constant.
- * 
+ *
  * @return A shared pointer to the created node.
  */
 template <std::size_t DIM>
@@ -257,12 +265,12 @@ std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode,
 
 /**
  * @brief Adds a producer node to another node with a C-style array.
- * 
+ *
  * @param[in] otherNode The node to associate with the producer.
  * @param[in] inputIdx The input index.
  * @param[in] dims C-style array defining the tensor dimensions.
  * @param[in] extension An extension string for the producer.
- * 
+ *
  * @return A shared pointer to the updated node.
  */
 template <std::size_t DIM>
@@ -272,12 +280,4 @@ std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOInde
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Enum string representation for `ProdAttr`.
- */
-template <>
-const char* const EnumStrings<Aidge::ProdAttr>::data[] = {"constant"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index c6d875719..3ee4a1bec 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -51,7 +51,16 @@ enum class ReduceMeanAttr {
    */
   NoopWithEmptyAxes
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {
+        "axes",
+        "keep_dims",
+        "noop_with_empty_axes"
+    };
+}
+namespace Aidge {
 /**
  * @class ReduceMean_Op
  * @brief Implements the ReduceMean operation to compute the mean of a tensor along specified axes.
@@ -170,7 +179,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ReduceMeanAttr>::data; 
+		return EnumStrings<Aidge::ReduceMeanAttr>::data;
 	}
 
     virtual ~ReduceMean_Op() noexcept;
@@ -194,13 +203,5 @@ std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {
-    "axes",
-    "keep_dims",
-    "noop_with_empty_axes"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index 72f6bf9b2..adb58f895 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -52,6 +52,12 @@ enum class ReduceSumAttr {
   NoopWithEmptyAxes
 };
 
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
+}
+namespace Aidge {
 /**
  * @class ReduceSum_Op
  * @brief Implements the ReduceSum operation to compute the sum of a tensor along specified axes.
@@ -100,7 +106,7 @@ public:
     /**
      * @brief constructor for ReduceSum op
      * @param[in] axes around which perform the operation
-     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and
      * if false we remove the dimension completely
      * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
      * and if false, we reduce on all axes
@@ -176,7 +182,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ReduceSumAttr>::data; 
+		return EnumStrings<Aidge::ReduceSumAttr>::data;
 	}
 };
 
@@ -202,9 +208,4 @@ inline std::shared_ptr<Node> ReduceSum(const std::vector<std::int32_t> &axes={},
 }
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 51623737e..e69c42d4d 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -53,21 +53,29 @@ enum class ReshapeAttr {
      * @brief The target shape for the output tensor.
      */
     Shape,
-    
+
     /**
      * @brief Whether zeros in the shape attribute are allowed.
-     * 
+     *
      * When true, zeros in the target shape retain the corresponding dimension size from the input tensor.
      */
     AllowZero
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for ReshapeAttr.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = {"shape", "allow_zero"};
+}
+namespace Aidge {
 /**
  * @brief Description of Reshape operator that adjusts the shape of the input tensor.
  *
- * This operator reshapes the input tensor according to the specified target shape. 
- * If the target shape is not compatible with the input tensor's total number of elements, 
- * the operation will fail. If the `AllowZero` attribute is true, zeros in the target shape 
+ * This operator reshapes the input tensor according to the specified target shape.
+ * If the target shape is not compatible with the input tensor's total number of elements,
+ * the operation will fail. If the `AllowZero` attribute is true, zeros in the target shape
  * retain the corresponding dimensions from the input tensor.
  *
  * @example Input: Tensor of dimensions `[2, 3]` with `Shape = {3, 2}` results in a tensor with dimensions `[3, 2]`.
@@ -182,7 +190,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ReshapeAttr>::data; 
+		return EnumStrings<Aidge::ReshapeAttr>::data;
 	}
 };
 
@@ -200,12 +208,5 @@ std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
 
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for ReshapeAttr.
- */
-template <>
-const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = {"shape", "allow_zero"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_RESHAPE_H_ */
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 3a4ef3771..37d42fcc8 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -39,7 +39,17 @@ enum class ResizeAttr {
     InterpolationMode,
     PaddingMode
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
+        "coordinate_transformation_mode",
+        "cubic_coeff_a",
+        "interpolation_mode",
+        "padding_mode"
+    };
+}
+namespace Aidge {
 /**
  * @brief Resize operator, will up/downscale a given tensor given the input.
  * @verbatim
@@ -197,7 +207,7 @@ class Resize_Op
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ResizeAttr>::data; 
+		return EnumStrings<Aidge::ResizeAttr>::data;
 	}
 };
 
@@ -230,13 +240,4 @@ Resize(std::vector<float> scale = std::vector<float>(),
 
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
-    "coordinate_transformation_mode",
-    "cubic_coeff_a",
-    "interpolation_mode",
-    "padding_mode"
-};
-}
 #endif /* AIDGE_CORE_OPERATOR_RESIZE_H_ */
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index c1f4514c9..fb342d345 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -23,7 +23,7 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-// Caution: This operator is now deprecated and should no longer be used. 
+// Caution: This operator is now deprecated and should no longer be used.
 // It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
 
 namespace Aidge {
@@ -38,7 +38,7 @@ enum class ScalingAttr {
     /**
      * @brief Number of quantization bits.
      *
-     * Specifies the bit-width used for quantization. 
+     * Specifies the bit-width used for quantization.
      * For example, a value of `8` represents 8-bit quantization.
      */
     QuantizedNbBits,
@@ -51,12 +51,18 @@ enum class ScalingAttr {
      */
     IsOutputUnsigned
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::ScalingAttr>::data[]
+        = {"scaling_factor", "quantized_nb_bits", "is_output_unsigned"};
+}
+namespace Aidge {
 /**
  * @brief Description of a scaling operation to scale and quantize input tensors.
  *
- * The `Scaling_Op` class applies a scaling factor to the input tensor, quantizes 
- * the scaled values to a specified bit-width, and outputs either signed or unsigned integers 
+ * The `Scaling_Op` class applies a scaling factor to the input tensor, quantizes
+ * the scaled values to a specified bit-width, and outputs either signed or unsigned integers
  * based on the configuration.
  *
  * The input and output Tensors have the same dimensions.
@@ -94,7 +100,7 @@ public:
     /**
      * @brief Copy-constructor.
      * @param[in] op Scaling_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not its input tensors. 
+     * @details Copies the operator attributes and its output tensor(s), but not its input tensors.
      * The new operator has no associated input.
      */
     Scaling_Op(const Scaling_Op& op);
@@ -140,7 +146,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ScalingAttr>::data; 
+		return EnumStrings<Aidge::ScalingAttr>::data;
 	}
 };
 
@@ -159,10 +165,5 @@ std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
                                      const std::string& name = "");
 } // namespace Aidge
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::ScalingAttr>::data[]
-    = {"scaling_factor", "quantized_nb_bits", "is_output_unsigned"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 84d497abf..2a553fb82 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -62,7 +62,15 @@ enum class ShapeAttr {
      */
     End
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for ShapeAttr.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"start", "end"};
+}
+namespace Aidge {
 /**
  * @brief Description of the operation of extracting the shape of a tensor.
  *
@@ -169,7 +177,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ShapeAttr>::data; 
+		return EnumStrings<Aidge::ShapeAttr>::data;
 	}
 };
 
@@ -185,12 +193,6 @@ std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for ShapeAttr.
- */
-template <>
-const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"start", "end"};
-}
+
 
 #endif /* AIDGE_CORE_OPERATOR_SHAPE_H_ */
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index ea4d21e9a..fa21b3d19 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -84,7 +84,12 @@ enum class SliceAttr {
      */
     Steps
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" };
+}
+namespace Aidge{
 /**
  * @class Slice_Op
  * @brief Implements the Slice operation for extracting sub-tensors.
@@ -209,7 +214,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::SliceAttr>::data; 
+		return EnumStrings<Aidge::SliceAttr>::data;
 	}
 };
 
@@ -231,9 +236,4 @@ std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_SLICE_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index a7d8283a0..86e1a57e7 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -33,7 +33,15 @@ enum class SoftmaxAttr {
      */
     Axis
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for SoftmaxAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"};
+}
+namespace Aidge {
 /**
  * @brief Description of a Softmax operation on input Tensor along a specified axis.
  *
@@ -136,7 +144,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::SoftmaxAttr>::data; 
+		return EnumStrings<Aidge::SoftmaxAttr>::data;
 	}
 };
 
@@ -151,12 +159,4 @@ std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "");
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for SoftmaxAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 9f2beb3aa..8b6acb060 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -65,7 +65,17 @@ enum class SplitAttr {
      */
     Split
 };
+} // namespace Aidge
 
+namespace {
+    /**
+     * @brief EnumStrings specialization for SplitAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::SplitAttr>::data[] = {"axis", "split"};
+    }
+
+namespace Aidge {
 /**
  * @class Split_Op
  * @brief Implements the Split operation to divide a tensor into multiple sub-tensors along a specified axis.
@@ -179,7 +189,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::SplitAttr>::data; 
+		return EnumStrings<Aidge::SplitAttr>::data;
 	}
 };
 
@@ -199,12 +209,5 @@ std::shared_ptr<Node> Split(DimSize_t nbOutput,
 
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for SplitAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::SplitAttr>::data[] = {"axis", "split"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_SPLIT_H_ */
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 9a2cc8f54..69fa9d493 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -48,7 +48,12 @@ enum class SqueezeAttr {
    */
   Axes
 };
-
+} // namespace Aidge
+namespace {
+  template <>
+  const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"axes"};
+}
+namespace Aidge {
 /**
  * @brief This operator has as purpose to remove dummy dimensions around given
  * axes.
@@ -148,7 +153,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::SqueezeAttr>::data; 
+		return EnumStrings<Aidge::SqueezeAttr>::data;
 	}
 };
 
@@ -160,9 +165,4 @@ inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {},
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"axes"};
-}
-
 #endif // AIDGE_CORE_OPERATOR_SQUEEZE_H_
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
index 0e420789d..214428447 100644
--- a/include/aidge/operator/Stack.hpp
+++ b/include/aidge/operator/Stack.hpp
@@ -95,7 +95,15 @@ enum class StackAttr {
     ForwardStep,   // Tracks the current step in the forward pass.
     MaxElements    // Maximum number of elements that can be stacked.
 };
-
+}  // namespace Aidge
+namespace {
+    /**
+     * @brief String representations of the Stack operator's attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::StackAttr>::data[] = {"forward_step", "max_elements"};
+}
+namespace Aidge {
 /**
  * @class StackOp
  * @brief The `Stack` operator performs a stacking operation over a sequence of input tensors.
@@ -218,7 +226,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::StackAttr>::data; 
+		return EnumStrings<Aidge::StackAttr>::data;
 	}
 };
 
@@ -231,12 +239,5 @@ public:
 std::shared_ptr<Node> Stack(std::uint32_t maxElements = 0, const std::string& name = "");
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief String representations of the Stack operator's attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::StackAttr>::data[] = {"forward_step", "max_elements"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_STACK_H_ */
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index d760ccd0d..2619c5ea5 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -54,13 +54,21 @@ public:
 enum class TransposeAttr {
     /**
      * @brief Order of the output dimensions relative to the input dimensions.
-     * 
+     *
      * If this attribute is empty, the dimensions of the input tensor will
      * be reversed.
      */
     OutputDimsOrder
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for TransposeAttr.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"output_dims_order"};
+    }
+namespace Aidge {
 /**
  * @brief Describes the operation of transposing the axes of a given tensor.
  *
@@ -172,7 +180,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::TransposeAttr>::data; 
+		return EnumStrings<Aidge::TransposeAttr>::data;
 	}
 };
 
@@ -188,12 +196,5 @@ std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder =
 
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for TransposeAttr.
- */
-template <>
-const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"output_dims_order"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_TRANSPOSE_H_ */
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index bea32c6cc..d220807d6 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -71,13 +71,25 @@ enum class UnfoldAttr {
      */
     KernelDims
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for UnfoldAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::UnfoldAttr>::data[] = {
+        "stride_dims",
+        "dilation_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @brief Describes the operation of unfolding a tensor into sliding blocks.
- * 
+ *
  * The Unfold operator extracts sliding blocks from the input tensor along
  * specified dimensions, controlled by stride, dilation, and kernel size.
- * 
+ *
  * @tparam DIM Number of dimensions involved in the operation.
  *
  * @example Input: Tensor of dimensions `[1, 3, 32, 32]`, with `KernelDims = {3, 3}`,
@@ -205,7 +217,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::UnfoldAttr>::data; 
+		return EnumStrings<Aidge::UnfoldAttr>::data;
 	}
 };
 
@@ -237,16 +249,5 @@ inline std::shared_ptr<Node> Unfold( DimSize_t const (&kernelDims)[DIM],
 
 extern template class Aidge::Unfold_Op<2>;
 
-namespace {
-/**
- * @brief EnumStrings specialization for UnfoldAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::UnfoldAttr>::data[] = {
-    "stride_dims",
-    "dilation_dims",
-    "kernel_dims"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_UNFOLD_H_ */
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index 8c5909182..a78a98672 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -47,7 +47,12 @@ enum class UnsqueezeAttr {
    */
   Axes
 };
-
+} // namespace Aidge
+namespace {
+  template <>
+  const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"axes"};
+}
+namespace Aidge {
 /**
  * @brief This operator has as purpose to add a dummy dimension around given
  * axis. Unsqueezing the 2nd dim of a tensor of dim (1,2,3,4) will result in a
@@ -146,7 +151,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::UnsqueezeAttr>::data; 
+		return EnumStrings<Aidge::UnsqueezeAttr>::data;
 	}
 };
 
@@ -158,9 +163,4 @@ inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"axes"};
-}
-
 #endif // AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
diff --git a/include/aidge/operator/WeightInterleaving.hpp b/include/aidge/operator/WeightInterleaving.hpp
index 315bb3e2d..a8f8c3d74 100644
--- a/include/aidge/operator/WeightInterleaving.hpp
+++ b/include/aidge/operator/WeightInterleaving.hpp
@@ -30,10 +30,10 @@ namespace Aidge {
  * @brief WeightInterleaving operator Compresses the last dimension of a tensor by packing low-bitwidth values
  * (e.g., 2, 3, or 4 bits) into fewer bytes.
  *
- * The operator reduces the size of the last dimension based on the bitwidth (`nb_bits`), 
- * packing multiple values into each byte. For example, 4-bit values result in a halved last dimension, 
+ * The operator reduces the size of the last dimension based on the bitwidth (`nb_bits`),
+ * packing multiple values into each byte. For example, 4-bit values result in a halved last dimension,
  * while 2-bit values reduce it by a factor of 4.
- * 
+ *
  * The output tensor has the same shape as the input, except for the compressed last dimension.
  *
  * @see OperatorTensor
@@ -78,10 +78,10 @@ public:
 
     /**
      * @brief Calculates the required size for the 8-bits`compactData` vector.
-     * 
+     *
      * This function determines the minimum number of bytes needed in `compactData`
      * to store `dataSize` elements compacted to `nb_bits` bits each.
-     * 
+     *
      * @param dataSize The total number of elements in the input data array.
      * @param nb_bits The number of bits to use for each compacted element (from 1 to 7).
      * @return std::size_t The required size in bytes for `compactData`.
-- 
GitLab


From 897f3cb8e868c867aad9f7e3d3b3c561cedc74f7 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Fri, 14 Feb 2025 12:53:17 +0000
Subject: [PATCH 20/31] [Fix] Add default arg axis=0 for concat

---
 include/aidge/operator/Concat.hpp         |  4 ++--
 python_binding/operator/pybind_Concat.cpp | 24 +++++++++++------------
 2 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 83914b673..ad31ef1a3 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -56,7 +56,7 @@ enum class ConcatAttr {
      *
      * The specified axis determines the direction of concatenating.
      */
-    Axis 
+    Axis
 };
 
 /**
@@ -107,7 +107,7 @@ public:
      * @param[in] nbIn Number of input tensors.
      * @param[in] axis Axis along which concatenation is performed.
      */
-    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis);
+    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis = 0);
 
     /**
      * @brief Copy-constructor. Copies the operator attributes and its output tensors,
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 9e1b3de9e..d2410b03a 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -24,30 +24,30 @@ void init_Concat(py::module& m) {
         R"mydelimiter(
         Initialize a Concat operator.
 
-        :param nb_inputs : The number of input tensors to concatenate.
-        :type nb_inputs : :py:class:`int`
-        :param axis : The axis along which to concatenate the tensors.
-        :type axis : :py:class:`int`
+        :param nb_inputs: The number of input tensors to concatenate.
+        :type nb_inputs: :py:class:`int`
+        :param axis: The axis along which to concatenate the tensors, default=0.
+        :type axis: :py:class:`int`
         )mydelimiter")
         .def(py::init<const IOIndex_t, const int>(),
              py::arg("nb_inputs"),
-             py::arg("axis"))
+             py::arg("axis") = 0)
         .def_static("get_inputs_name", &Concat_Op::getInputsName)
         .def_static("get_outputs_name", &Concat_Op::getOutputsName)
         .def_readonly_static("Type", &Concat_Op::Type);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
 
-    m.def("Concat", &Concat, py::arg("nb_inputs"), py::arg("axis"), py::arg("name") = "",
+    m.def("Concat", &Concat, py::arg("nb_inputs"), py::arg("axis") = 0, py::arg("name") = "",
         R"mydelimiter(
         Initialize a node containing a Concat operator.
 
-        :param nb_inputs : The number of input tensors to concatenate.
-        :type nb_inputs : :py:class:`int`
-        :param axis : The axis along which to concatenate the tensors.
-        :type axis : :py:class:`int`
-        :param name : Name of the node.
-        :type name : :py:class:`str`
+        :param nb_inputs: The number of input tensors to concatenate.
+        :type nb_inputs: :py:class:`int`
+        :param axis: The axis along which to concatenate the tensors.
+        :type axis: :py:class:`int`
+        :param name: Name of the node.
+        :type name: :py:class:`str`
         )mydelimiter");
 }
 
-- 
GitLab


From 89533f701acd207b823b7a6bb4700fcc38719162 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Fri, 14 Feb 2025 12:55:07 +0000
Subject: [PATCH 21/31] [Fix] Make Unsqueeze registrable

---
 python_binding/operator/pybind_Unsqueeze.cpp | 29 ++++++++++----------
 1 file changed, 14 insertions(+), 15 deletions(-)

diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index b61cb40ce..7ef8af8b6 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -23,26 +23,25 @@ void init_Unsqueeze(py::module &m) {
   py::class_<Unsqueeze_Op, std::shared_ptr<Unsqueeze_Op>, OperatorTensor>(
       m, "UnsqueezeOp", py::multiple_inheritance(),
       R"mydelimiter(
-		Initialize an unsqueeze operator.
-		:param axes :   axes to unsqueeze between [-r;r-1] 
-						with r = input_tensor.nbDims() + len(axes)
-		:type axes : :py:class: List[Int]
+            Initialize an unsqueeze operator.
+            :param axes:   axes to unsqueeze between [-r;r-1] with r = input_tensor.nbDims() + len(axes)
+            :type axes: :py:class: List[Int]
 		)mydelimiter")
       // Here we bind the methods of the Unsqueeze_Op that will want to access
       .def("get_inputs_name", &Unsqueeze_Op::getInputsName)
       .def("get_outputs_name", &Unsqueeze_Op::getOutputsName)
-      .def("axes", &Unsqueeze_Op::axes);
-  // Here we bind the constructor of the Unsqueeze Node. We add an argument for
-  // each attribute of the operator (in here we only have 'axes') and the last
-  // argument is the node's name.
+      .def_readonly_static("Type", &Unsqueeze_Op::Type)
+      ;
+
+  declare_registrable<Unsqueeze_Op>(m, "UnsqueezeOp");
+
   m.def("Unsqueeze", &Unsqueeze, py::arg("axes") = std::vector<int8_t>({}),
         py::arg("name") = "",
         R"mydelimiter(
-    Initialize a node containing an unsqueeze operator.
-	:param axes :   axes to unsqueeze between [-r;r-1] 
-					with r = input_tensor.nbDims() + len(axes)
-	:type axes : :py:class: List[Int]
-    :param name : name of the node.
-)mydelimiter");
-}
+            Initialize a node containing an unsqueeze operator.
+            :param axes:   axes to unsqueeze between [-r;r-1] with r = input_tensor.nbDims() + len(axes)
+            :type axes: :py:class: List[Int]
+            :param name: name of the node.
+        )mydelimiter");
+    }
 } // namespace Aidge
-- 
GitLab


From 0182775fd06a414b04892fec8e2a3c7479bb2382 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Fri, 14 Feb 2025 13:00:44 +0000
Subject: [PATCH 22/31] [Fix] Make Squeeze registrable + fix python doc.

---
 python_binding/operator/pybind_Squeeze.cpp | 44 +++++++++++-----------
 1 file changed, 22 insertions(+), 22 deletions(-)

diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
index ca90fb46a..188ce745d 100644
--- a/python_binding/operator/pybind_Squeeze.cpp
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -24,29 +24,29 @@ namespace Aidge {
 
 void init_Squeeze(py::module &m) {
   py::class_<Squeeze_Op, std::shared_ptr<Squeeze_Op>, OperatorTensor>(
-      m, "SqueezeOp", py::multiple_inheritance(),
-		R"mydelimiter(
-		Initialize squeeze operator
-		:param axes :   axes to squeeze between [-r;r-1] 
-						with r = input_tensor.nbDims()
-						& r in [-128 , 127]
-		:type axes : :py:class: List[Int]
-		)mydelimiter")
-      .def("get_inputs_name", &Squeeze_Op::getInputsName)
-      .def("get_outputs_name", &Squeeze_Op::getOutputsName)
-      .def("axes", &Squeeze_Op::axes);
-  // Here we bind the constructor of the Squeeze Node. We add an argument
-  // for each attribute of the operator (in here we only have 'axes') and
-  // the last argument is the node's name.
-  m.def("Squeeze", &Squeeze, py::arg("axes") = std::vector<int8_t>({}),
+    m, "SqueezeOp", py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize squeeze operator
+    :param axes:   axes to squeeze between [-r;r-1]
+    				with r = input_tensor.nbDims()
+    				& r in [-128 , 127]
+    :type axes: :py:class: List[Int]
+    )mydelimiter")
+    .def("get_inputs_name", &Squeeze_Op::getInputsName)
+    .def("get_outputs_name", &Squeeze_Op::getOutputsName)
+    .def("axes", &Squeeze_Op::axes);
+
+    declare_registrable<Squeeze_Op>(m, "SqueezeOp");
+    m.def("Squeeze", &Squeeze, py::arg("axes") = std::vector<int8_t>({}),
         py::arg("name") = "",
         R"mydelimiter(
-    Initialize a node containing a squeeze operator.
-	:param axes :   axes to squeeze between [-r;r-1] 
-					with r = input_tensor.nbDims()
-					& r in [-128 , 127]
-	:type axes : :py:class: List[Int]
-    :param name : name of the node.
-)mydelimiter");
+            Initialize a node containing a squeeze operator.
+            :param axes:   axes to squeeze between [-r;r-1]
+                            with r = input_tensor.nbDims()
+                            & r in [-128 , 127]
+            :type axes: :py:class: List[Int]
+            :param name: name of the node.
+            :type name: str
+        )mydelimiter");
 }
 } // namespace Aidge
-- 
GitLab


From b8fdfffd7fb714ad61f99db51e94c59a3557f98e Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Fri, 14 Feb 2025 13:05:00 +0000
Subject: [PATCH 23/31] Switch multiple attribute name to follow snake case
 convention.

---
 include/aidge/operator/BitShift.hpp  | 6 +++---
 include/aidge/operator/Resize.hpp    | 8 ++++----
 include/aidge/operator/Squeeze.hpp   | 2 +-
 include/aidge/operator/Unsqueeze.hpp | 2 +-
 4 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index 711cf8585..9368e3461 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
 
 enum class BitShiftAttr {
     /**
-     * 
+     *
      */
     BitShiftdirection
 };
@@ -41,7 +41,7 @@ enum class BitShiftAttr {
  * - **InputTensor**: The tensor whose elements will be shifted.
  * - **ShiftAmount**: The tensor specifying the shift amount for each element.
  *
- * The shift is applied in the direction specified by the attribute `BitShiftdirection`, 
+ * The shift is applied in the direction specified by the attribute `BitShiftdirection`,
  * which can either be `left` or `right`.
  *
  * @see OperatorTensor
@@ -166,7 +166,7 @@ namespace {
  * @brief Specialization of `EnumStrings` for `BitShiftAttr`.
  */
 template <>
-const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = { "BitShiftdirection" };
+const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = { "bit_shift_direction" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index c3c7838ef..89224f927 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -225,10 +225,10 @@ Resize(std::vector<float> scale = std::vector<float>(),
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
-    "coordinateTransformationMode",
-    "cubicCoeffA",
-    "InterpolationMode",
-    "PaddingMode"
+    "coordinate_transformation_mode",
+    "cubic_coeff_a",
+    "interpolation_mode",
+    "padding_mode"
 };
 }
 #endif /* AIDGE_CORE_OPERATOR_RESIZE_H_ */
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 5c966edaf..e3c1f4de1 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -154,7 +154,7 @@ inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {},
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"Axes"};
+const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"axes"};
 }
 
 #endif // AIDGE_CORE_OPERATOR_SQUEEZE_H_
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index c07105405..c25800acb 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -152,7 +152,7 @@ inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"Axes"};
+const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"axes"};
 }
 
 #endif // AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
-- 
GitLab


From b2d46b46e42486d5c1d29bc158a0f4a17aebe2ed Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Fri, 14 Feb 2025 15:28:15 +0000
Subject: [PATCH 24/31] Update def to def_static for static funtions.

---
 python_binding/operator/pybind_AvgPooling.cpp      | 10 +++++-----
 python_binding/operator/pybind_ConstantOfShape.cpp |  8 ++++----
 python_binding/operator/pybind_Squeeze.cpp         |  4 ++--
 python_binding/operator/pybind_Unsqueeze.cpp       |  4 ++--
 4 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index e376bcffb..f93df9e2c 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -31,17 +31,17 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 
   const std::string pyClassName("AvgPooling" + std::to_string(DIM) + "DOp");
   const std::string pyStaticAttrClassName("StaticAttributes" + pyClassName);
-  
+
   py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor>(
         m, pyClassName.c_str(),
         py::multiple_inheritance(),
         R"mydelimiter(
         Initialize an AvgPooling operator for a tensor.
 
-        This operator performs average pooling on the input tensor using the specified kernel dimensions 
+        This operator performs average pooling on the input tensor using the specified kernel dimensions
         and stride dimensions.
 
-        :param kernel_dims: The size of the kernel (filter) applied during pooling. 
+        :param kernel_dims: The size of the kernel (filter) applied during pooling.
                              Specifies the dimensions of the kernel (e.g., [3, 3] for 2D pooling).
         :type kernel_dims: List[int]
         :param stride_dims: The stride of the pooling operation. Specifies how much the kernel moves in each step.
@@ -60,8 +60,8 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
             py::arg("stride_dims") = create_array<DimSize_t, DIM>(1),
             py::arg("dilations") = create_array<DimSize_t, DIM>(1),
             py::arg("ceil_mode") = false)
-    .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+    .def_static("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
+    .def_static("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
     .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
index 07079d983..5a0e858f1 100644
--- a/python_binding/operator/pybind_ConstantOfShape.cpp
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -27,20 +27,20 @@ void init_ConstantOfShape(py::module &m) {
       R"mydelimiter(
       Initialize a ConstantOfShape operator.
 
-      :param value : Tensor with a given datatype that contains the value 
+      :param value : Tensor with a given datatype that contains the value
                      that will fill the output tensor.
       :type value : :py:class:`Tensor`
       )mydelimiter")
       .def("get_inputs_name", &ConstantOfShape_Op::getInputsName)
-      .def("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
-      .def("value", &ConstantOfShape_Op::value);
+      .def_static("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
+      .def_static("value", &ConstantOfShape_Op::value);
 
   m.def("ConstantOfShape", &ConstantOfShape, py::arg("value") = Tensor(0.f),
         py::arg("name") = "",
         R"mydelimiter(
         Initialize a node containing a ConstantOfShape operator.
 
-        :param value : Tensor with a given datatype that contains the value 
+        :param value : Tensor with a given datatype that contains the value
                        that will fill the output tensor.
         :type value : :py:class:`Tensor`
         :param name  : Name of the node.
diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
index 188ce745d..f7ee4d722 100644
--- a/python_binding/operator/pybind_Squeeze.cpp
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -32,8 +32,8 @@ void init_Squeeze(py::module &m) {
     				& r in [-128 , 127]
     :type axes: :py:class: List[Int]
     )mydelimiter")
-    .def("get_inputs_name", &Squeeze_Op::getInputsName)
-    .def("get_outputs_name", &Squeeze_Op::getOutputsName)
+    .def_static("get_inputs_name", &Squeeze_Op::getInputsName)
+    .def_static("get_outputs_name", &Squeeze_Op::getOutputsName)
     .def("axes", &Squeeze_Op::axes);
 
     declare_registrable<Squeeze_Op>(m, "SqueezeOp");
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index 7ef8af8b6..c21a7bcfa 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -28,8 +28,8 @@ void init_Unsqueeze(py::module &m) {
             :type axes: :py:class: List[Int]
 		)mydelimiter")
       // Here we bind the methods of the Unsqueeze_Op that will want to access
-      .def("get_inputs_name", &Unsqueeze_Op::getInputsName)
-      .def("get_outputs_name", &Unsqueeze_Op::getOutputsName)
+      .def_static("get_inputs_name", &Unsqueeze_Op::getInputsName)
+      .def_static("get_outputs_name", &Unsqueeze_Op::getOutputsName)
       .def_readonly_static("Type", &Unsqueeze_Op::Type)
       ;
 
-- 
GitLab


From f9384ccf4ec25611f240a1b1b8e837fea8535029 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Tue, 18 Feb 2025 08:39:04 +0000
Subject: [PATCH 25/31] Add attributesName function in C++ and Python API.

---
 include/aidge/operator/ArgMax.hpp             |   8 ++
 include/aidge/operator/AvgPooling.hpp         |   8 ++
 include/aidge/operator/BatchNorm.hpp          |   8 ++
 include/aidge/operator/BitShift.hpp           |  10 +-
 include/aidge/operator/Cast.hpp               |   8 ++
 include/aidge/operator/Clip.hpp               |   8 ++
 include/aidge/operator/Concat.hpp             |   8 ++
 include/aidge/operator/ConstantOfShape.hpp    |  12 +-
 include/aidge/operator/Conv.hpp               |   8 ++
 include/aidge/operator/ConvDepthWise.hpp      |   8 ++
 include/aidge/operator/DepthToSpace.hpp       |   8 ++
 include/aidge/operator/Flatten.hpp            |   8 ++
 include/aidge/operator/Fold.hpp               |   8 ++
 include/aidge/operator/Gather.hpp             |   8 ++
 include/aidge/operator/GridSample.hpp         |   8 ++
 include/aidge/operator/Heaviside.hpp          |   8 ++
 include/aidge/operator/LRN.hpp                |  10 +-
 include/aidge/operator/LeakyReLU.hpp          |   8 ++
 include/aidge/operator/MaxPooling.hpp         |   8 ++
 include/aidge/operator/Memorize.hpp           |   8 ++
 include/aidge/operator/Pad.hpp                |   8 ++
 include/aidge/operator/Pop.hpp                |   8 ++
 include/aidge/operator/ReduceMean.hpp         |   8 ++
 include/aidge/operator/ReduceSum.hpp          |   8 ++
 include/aidge/operator/Reshape.hpp            |   8 ++
 include/aidge/operator/Resize.hpp             |   8 ++
 include/aidge/operator/Scaling.hpp            |   8 ++
 include/aidge/operator/Shape.hpp              |   8 ++
 include/aidge/operator/Slice.hpp              |   8 ++
 include/aidge/operator/Softmax.hpp            |   8 ++
 include/aidge/operator/Split.hpp              |   8 ++
 include/aidge/operator/Squeeze.hpp            |   8 ++
 include/aidge/operator/Stack.hpp              |   8 ++
 include/aidge/operator/Transpose.hpp          |   8 ++
 include/aidge/operator/Unfold.hpp             |   8 ++
 include/aidge/operator/Unsqueeze.hpp          |   8 ++
 python_binding/operator/pybind_ArgMax.cpp     |   8 ++
 python_binding/operator/pybind_AvgPooling.cpp |   9 ++
 python_binding/operator/pybind_BatchNorm.cpp  |   9 ++
 python_binding/operator/pybind_BitShift.cpp   |  10 +-
 python_binding/operator/pybind_Cast.cpp       |  10 +-
 python_binding/operator/pybind_Clip.cpp       | 127 ++++++++++--------
 python_binding/operator/pybind_Concat.cpp     |   9 ++
 .../operator/pybind_ConstantOfShape.cpp       |  12 +-
 python_binding/operator/pybind_Conv.cpp       |   9 ++
 .../operator/pybind_ConvDepthWise.cpp         |   9 ++
 .../operator/pybind_DepthToSpace.cpp          |   9 ++
 python_binding/operator/pybind_Gather.cpp     |   9 ++
 python_binding/operator/pybind_GridSample.cpp |   9 ++
 python_binding/operator/pybind_Heaviside.cpp  |   9 ++
 python_binding/operator/pybind_LRN.cpp        |   9 ++
 python_binding/operator/pybind_LeakyReLU.cpp  |   9 ++
 python_binding/operator/pybind_MaxPooling.cpp |   9 ++
 python_binding/operator/pybind_Memorize.cpp   |  10 +-
 python_binding/operator/pybind_Pad.cpp        |   8 ++
 python_binding/operator/pybind_Pop.cpp        |   9 ++
 python_binding/operator/pybind_ReduceMean.cpp |   8 ++
 python_binding/operator/pybind_ReduceSum.cpp  |   9 ++
 python_binding/operator/pybind_Reshape.cpp    |   9 ++
 python_binding/operator/pybind_Resize.cpp     |  16 ++-
 python_binding/operator/pybind_Scaling.cpp    |   9 ++
 python_binding/operator/pybind_Shape.cpp      |   9 ++
 python_binding/operator/pybind_Slice.cpp      |   9 ++
 python_binding/operator/pybind_Softmax.cpp    |   9 ++
 python_binding/operator/pybind_Split.cpp      |   9 ++
 python_binding/operator/pybind_Squeeze.cpp    |   9 ++
 python_binding/operator/pybind_Stack.cpp      |   9 ++
 python_binding/operator/pybind_Transpose.cpp  |   8 ++
 python_binding/operator/pybind_Unsqueeze.cpp  |   8 ++
 69 files changed, 647 insertions(+), 72 deletions(-)

diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index 7358899a9..6d24d87bd 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -177,6 +177,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ArgMaxAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index ab9e111f2..7e02a94ab 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -223,6 +223,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::AvgPoolingAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index ddffaeb02..995179d7f 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -152,6 +152,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::BatchNormAttr>::data; 
+	}
 };
 
 extern template class Aidge::BatchNorm_Op<2>;
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index 9368e3461..d066507dd 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -147,6 +147,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return { "OutputTensor" };
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::BitShiftAttr>::data;
+	}
 };
 
 /**
@@ -166,7 +174,7 @@ namespace {
  * @brief Specialization of `EnumStrings` for `BitShiftAttr`.
  */
 template <>
-const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = { "bit_shift_direction" };
+const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = {"bit_shift_direction"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 1f934fbc7..12c3a280a 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -137,6 +137,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::CastAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
index 0825b85bb..93c042d86 100644
--- a/include/aidge/operator/Clip.hpp
+++ b/include/aidge/operator/Clip.hpp
@@ -148,6 +148,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return { "data_output" };
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ClipAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index ad31ef1a3..7a4ea74a4 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -169,6 +169,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return { "data_output" };
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ConcatAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index 18e626544..d837d108a 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -63,7 +63,7 @@ private:
 public:
   /**
    * @brief constructor for ConstantOfShape_op
-   * @param[in] value : a scalar tensor which holds the value that will 
+   * @param[in] value : a scalar tensor which holds the value that will
    * fill the output tensor
    */
   ConstantOfShape_Op(const Tensor &value = Tensor(0.f))
@@ -116,6 +116,14 @@ public:
   static const std::vector<std::string> getOutputsName() {
     return {"constant_of_shape"};
   }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ConstantOfShapeAttr>::data;
+	}
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
@@ -129,7 +137,7 @@ inline std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f),
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"Value"};
+const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"value"};
 }
 
 #endif // AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 8984ebd08..7beea057e 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -209,6 +209,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ConvAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 03e821041..3090b9feb 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -189,6 +189,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ConvDepthWiseAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index 769dad767..cc51ea180 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -164,6 +164,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::DepthToSpaceAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Flatten.hpp b/include/aidge/operator/Flatten.hpp
index a7f5c6435..10ce58ad0 100644
--- a/include/aidge/operator/Flatten.hpp
+++ b/include/aidge/operator/Flatten.hpp
@@ -155,6 +155,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::FlattenAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index 3b5b9449d..9d2d4e0df 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -210,6 +210,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::FoldAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index dc3e1a814..3842a041e 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -184,6 +184,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::GatherAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index 999f7bba1..28c5fb5e5 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -170,6 +170,14 @@ public:
 	static const std::vector<std::string> getOutputsName() {
 		return {"data_output"};
 	}
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::GridSampleAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp
index 94eaa400a..874853c4e 100644
--- a/include/aidge/operator/Heaviside.hpp
+++ b/include/aidge/operator/Heaviside.hpp
@@ -110,6 +110,14 @@ public:
         return {"output"};
     }
 
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::HeavisideAttr>::data; 
+	}
+
     /**
      * @brief Get the attributes of the operator.
      */
diff --git a/include/aidge/operator/LRN.hpp b/include/aidge/operator/LRN.hpp
index 369da5f97..9019c089b 100644
--- a/include/aidge/operator/LRN.hpp
+++ b/include/aidge/operator/LRN.hpp
@@ -158,6 +158,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::LRNAttr>::data; 
+	}
 };
 
 /**
@@ -176,7 +184,7 @@ namespace {
  * @brief EnumStrings specialization for LRNAttr.
  */
 template <>
-const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size"};
+const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size", nullptr};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_LRN_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 46730d026..5381b3cb1 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -115,6 +115,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::LeakyReLUAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 9063fb88b..f4f38de4a 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -198,6 +198,14 @@ public:
      * @return A vector of output tensors names.
      */
     static const std::vector<std::string> getOutputsName(){ return {"data_output"}; }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::MaxPoolingAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index deefc0077..10bbfce85 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -240,6 +240,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output", "data_output_rec"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::MemorizeAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index c1ed3500c..417e9664c 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -216,6 +216,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::PadAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 2cf567329..630c58c0d 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -211,6 +211,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::PopAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 6aded3638..c6d875719 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -165,6 +165,14 @@ public:
         return {"data_output"};
     }
 
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ReduceMeanAttr>::data; 
+	}
+
     virtual ~ReduceMean_Op() noexcept;
 };
 
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index 5a3674b21..72f6bf9b2 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -170,6 +170,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ReduceSumAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index c170ad79e..51623737e 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -176,6 +176,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ReshapeAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 89224f927..3a4ef3771 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -191,6 +191,14 @@ class Resize_Op
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ResizeAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index b33fb5841..c1f4514c9 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -134,6 +134,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ScalingAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 609e354d5..84d497abf 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -163,6 +163,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ShapeAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index d32bc4fe2..ea4d21e9a 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -203,6 +203,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::SliceAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 290132690..a7d8283a0 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -130,6 +130,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::SoftmaxAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 3c6b52d3c..9f2beb3aa 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -173,6 +173,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output_0", "data_output_n"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::SplitAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index e3c1f4de1..9a2cc8f54 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -142,6 +142,14 @@ public:
   static const std::vector<std::string> getOutputsName() {
     return {"squeezed"};
   }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::SqueezeAttr>::data; 
+	}
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
index 71e4e780a..0e420789d 100644
--- a/include/aidge/operator/Stack.hpp
+++ b/include/aidge/operator/Stack.hpp
@@ -212,6 +212,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::StackAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index ab3b18e51..d760ccd0d 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -166,6 +166,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::TransposeAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 333413b1d..bea32c6cc 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -199,6 +199,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::UnfoldAttr>::data; 
+	}
 };
 
 /**
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index c25800acb..8c5909182 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -140,6 +140,14 @@ public:
   static const std::vector<std::string> getOutputsName() {
     return {"unsqueezed"};
   }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::UnsqueezeAttr>::data; 
+	}
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
diff --git a/python_binding/operator/pybind_ArgMax.cpp b/python_binding/operator/pybind_ArgMax.cpp
index 3de54afd7..75f325749 100644
--- a/python_binding/operator/pybind_ArgMax.cpp
+++ b/python_binding/operator/pybind_ArgMax.cpp
@@ -43,6 +43,14 @@ void init_ArgMax(py::module &m) {
     .def(py::init<std::int32_t, bool, bool>(), py::arg("axis"), py::arg("keep_dims"), py::arg("select_last_index"))
     .def_static("get_inputs_name", &ArgMax_Op::getInputsName)
     .def_static("get_outputs_name", &ArgMax_Op::getOutputsName)
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = ArgMax_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<ArgMaxAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     ;
   declare_registrable<ArgMax_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index f93df9e2c..6130fc271 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -62,6 +62,15 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
             py::arg("ceil_mode") = false)
     .def_static("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
     .def_static("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = AvgPooling_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<AvgPoolingAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 3339db0f2..199ef8134 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -42,6 +42,15 @@ void declare_BatchNormOp(py::module& m) {
             py::arg("training_mode"))
         .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = BatchNorm_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<BatchNormAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &BatchNorm_Op<DIM>::Type);
 
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_BitShift.cpp b/python_binding/operator/pybind_BitShift.cpp
index b4f6c90e5..f2f4b223d 100644
--- a/python_binding/operator/pybind_BitShift.cpp
+++ b/python_binding/operator/pybind_BitShift.cpp
@@ -35,7 +35,15 @@ void init_BitShift(py::module &m) {
         .def(py::init<BitShift_Op::BitShiftDirection>(), py::arg("direction"))
         .def("direction", &BitShift_Op::direction, "Get the direction of the bit shift (left or right).")
         .def_static("get_inputs_name", &BitShift_Op::getInputsName, "Get the names of the input tensors.")
-        .def_static("get_outputs_name", &BitShift_Op::getOutputsName, "Get the names of the output tensors.");
+        .def_static("get_outputs_name", &BitShift_Op::getOutputsName, "Get the names of the output tensors.")
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = BitShift_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<BitShiftAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		});
 
     // Enum binding under BitShiftOp class
     py::enum_<BitShift_Op::BitShiftDirection>(pyBitShiftOp, "BitShiftDirection")
diff --git a/python_binding/operator/pybind_Cast.cpp b/python_binding/operator/pybind_Cast.cpp
index 960a084ff..1e0ad7f9b 100644
--- a/python_binding/operator/pybind_Cast.cpp
+++ b/python_binding/operator/pybind_Cast.cpp
@@ -32,7 +32,15 @@ void init_Cast(py::module &m) {
         .def(py::init<DataType>(), py::arg("target_type"))
         .def("target_type", &Cast_Op::targetType, "Get the targeted type, output tensor data type")
         .def_static("get_inputs_name", &Cast_Op::getInputsName, "Get the names of the input tensors.")
-        .def_static("get_outputs_name", &Cast_Op::getOutputsName, "Get the names of the output tensors.");
+        .def_static("get_outputs_name", &Cast_Op::getOutputsName, "Get the names of the output tensors.")
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Cast_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<CastAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		});
 
     // Binding for the Cast function
     m.def("Cast", &Cast, py::arg("target_type"), py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Clip.cpp b/python_binding/operator/pybind_Clip.cpp
index 7c4563a98..a22a002d4 100644
--- a/python_binding/operator/pybind_Clip.cpp
+++ b/python_binding/operator/pybind_Clip.cpp
@@ -1,59 +1,68 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <pybind11/pybind11.h>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/operator/Clip.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/utils/Types.h"
-
-namespace py = pybind11;
-namespace Aidge {
-
-void init_Clip(py::module& m) {
-    py::class_<Clip_Op, std::shared_ptr<Clip_Op>, OperatorTensor>(m, "ClipOp", py::multiple_inheritance(),
-        R"mydelimiter(
-        Initialize a Clip operator.
-
-        :param min : Minimum clipping value. Default is the lowest possible float value.
-        :type min : :py:class:`float`
-        :param max : Maximum clipping value. Default is the highest possible float value.
-        :type max : :py:class:`float`
-        )mydelimiter")
-    .def(py::init<float, float>(), py::arg("min") = std::numeric_limits<float>::lowest(), py::arg("max") = std::numeric_limits<float>::max())
-    .def_static("get_inputs_name", &Clip_Op::getInputsName)
-    .def_static("get_outputs_name", &Clip_Op::getOutputsName)
-    .def("min", &Clip_Op::min, py::return_value_policy::reference_internal)
-    .def("max", &Clip_Op::max, py::return_value_policy::reference_internal);
-
-    declare_registrable<Clip_Op>(m, "ClipOp");
-
-    m.def("Clip", &Clip, py::arg("name") = "",
-        py::arg("min") = std::numeric_limits<float>::lowest(),
-        py::arg("max") = std::numeric_limits<float>::max(),
-        R"mydelimiter(
-        ClipOp is a tensor operator that performs a clipping operation on tensor elements.
-        This class allows limiting tensor values to a specified range, defined by the `min` 
-        and `max` parameters. Values outside this range are replaced by the corresponding 
-        limit values. When `min` is greater than `max`, the clip operator sets all the 'input' values to the value of `max`.
-
-        :param min: Minimum clipping value.
-        :type min: :py:class:`float`
-        :param max: Maximum clipping value.
-        :type max: :py:class:`float`
-        :param name: Name of the node.
-        :type name: :py:class:`str`
-        )mydelimiter");
-}
-
-}  // namespace Aidge
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Clip.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Clip(py::module& m) {
+    py::class_<Clip_Op, std::shared_ptr<Clip_Op>, OperatorTensor>(m, "ClipOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Clip operator.
+
+        :param min : Minimum clipping value. Default is the lowest possible float value.
+        :type min : :py:class:`float`
+        :param max : Maximum clipping value. Default is the highest possible float value.
+        :type max : :py:class:`float`
+        )mydelimiter")
+    .def(py::init<float, float>(), py::arg("min") = std::numeric_limits<float>::lowest(), py::arg("max") = std::numeric_limits<float>::max())
+    .def_static("get_inputs_name", &Clip_Op::getInputsName)
+    .def_static("get_outputs_name", &Clip_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Clip_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ClipAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
+    .def("min", &Clip_Op::min, py::return_value_policy::reference_internal)
+    .def("max", &Clip_Op::max, py::return_value_policy::reference_internal);
+
+    declare_registrable<Clip_Op>(m, "ClipOp");
+
+    m.def("Clip", &Clip, py::arg("name") = "",
+        py::arg("min") = std::numeric_limits<float>::lowest(),
+        py::arg("max") = std::numeric_limits<float>::max(),
+        R"mydelimiter(
+        ClipOp is a tensor operator that performs a clipping operation on tensor elements.
+        This class allows limiting tensor values to a specified range, defined by the `min` 
+        and `max` parameters. Values outside this range are replaced by the corresponding 
+        limit values. When `min` is greater than `max`, the clip operator sets all the 'input' values to the value of `max`.
+
+        :param min: Minimum clipping value.
+        :type min: :py:class:`float`
+        :param max: Maximum clipping value.
+        :type max: :py:class:`float`
+        :param name: Name of the node.
+        :type name: :py:class:`str`
+        )mydelimiter");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index d2410b03a..236f16922 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -34,6 +34,15 @@ void init_Concat(py::module& m) {
              py::arg("axis") = 0)
         .def_static("get_inputs_name", &Concat_Op::getInputsName)
         .def_static("get_outputs_name", &Concat_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Concat_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ConcatAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Concat_Op::Type);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
index 5a0e858f1..b185f2f80 100644
--- a/python_binding/operator/pybind_ConstantOfShape.cpp
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -31,9 +31,17 @@ void init_ConstantOfShape(py::module &m) {
                      that will fill the output tensor.
       :type value : :py:class:`Tensor`
       )mydelimiter")
-      .def("get_inputs_name", &ConstantOfShape_Op::getInputsName)
+      .def_static("get_inputs_name", &ConstantOfShape_Op::getInputsName)
       .def_static("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
-      .def_static("value", &ConstantOfShape_Op::value);
+      .def_static("attributes_name", []() {
+        std::vector<std::string> result;
+        auto attributes = ConstantOfShape_Op::attributesName();
+        for (size_t i = 0; i < size(EnumStrings<ConstantOfShapeAttr>::data); ++i) {
+          result.emplace_back(attributes[i]);
+        }
+        return result;
+      })
+      .def("value", &ConstantOfShape_Op::value);
 
   m.def("ConstantOfShape", &ConstantOfShape, py::arg("value") = Tensor(0.f),
         py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 6ab073be6..e65a74c0c 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -43,6 +43,15 @@ void declare_ConvOp(py::module &m) {
             py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
         .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Conv_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ConvAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def("in_channels", &Conv_Op<DIM>::inChannels)
         .def("out_channels", &Conv_Op<DIM>::outChannels)
         .def_readonly_static("Type", &Conv_Op<DIM>::Type)
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 5e24431d7..7ddbefd3d 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -56,6 +56,15 @@ void declare_ConvDepthWiseOp(py::module &m) {
         py::arg("dilation_dims"))
   .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = ConvDepthWise_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ConvDepthWiseAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+				return result;
+		})
   .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels)
   .def_readonly_static("Type", &ConvDepthWise_Op<DIM>::Type);
 
diff --git a/python_binding/operator/pybind_DepthToSpace.cpp b/python_binding/operator/pybind_DepthToSpace.cpp
index efb8a7406..d33386711 100644
--- a/python_binding/operator/pybind_DepthToSpace.cpp
+++ b/python_binding/operator/pybind_DepthToSpace.cpp
@@ -37,6 +37,15 @@ void declare_DepthToSpace(py::module &m) {
         }), py::arg("block_size"), py::arg("mode") = "CRD")
     .def_static("get_inputs_name", &DepthToSpace_Op::getInputsName)
     .def_static("get_outputs_name", &DepthToSpace_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = DepthToSpace_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<DepthToSpaceAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &DepthToSpace_Op::Type)
     .def("__repr__", [](DepthToSpace_Op& b) {
         return fmt::format("Operator(type='{}')", b.Type);
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index fed44a1e2..6afeb42a7 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -44,6 +44,15 @@ void init_Gather(py::module& m) {
                 py::arg("gathered_shape"))
         .def_static("get_inputs_name", &Gather_Op::getInputsName)
         .def_static("get_outputs_name", &Gather_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Gather_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<GatherAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Gather_Op::Type);
 
     declare_registrable<Gather_Op>(m, "GatherOp");
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index 3464941dd..f4f0335fd 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -65,6 +65,15 @@ void declare_GridSampleOp(py::module &m) {
             py::arg("align_corners") = false)
         .def_static("get_inputs_name", &GridSample_Op::getInputsName)
         .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = GridSample_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<GridSampleAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &GridSample_Op::Type)
         ;
 
diff --git a/python_binding/operator/pybind_Heaviside.cpp b/python_binding/operator/pybind_Heaviside.cpp
index cbc2502aa..b8d7f1d80 100644
--- a/python_binding/operator/pybind_Heaviside.cpp
+++ b/python_binding/operator/pybind_Heaviside.cpp
@@ -37,6 +37,15 @@ void init_Heaviside(py::module &m) {
         .def(py::init<float>(), py::arg("value"))
         .def_static("get_inputs_name", &Heaviside_Op::getInputsName)
         .def_static("get_outputs_name", &Heaviside_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Heaviside_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<HeavisideAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Heaviside_Op::Type);
 
     declare_registrable<Heaviside_Op>(m, "HeavisideOp");
diff --git a/python_binding/operator/pybind_LRN.cpp b/python_binding/operator/pybind_LRN.cpp
index bb04ed1c5..f802152ba 100644
--- a/python_binding/operator/pybind_LRN.cpp
+++ b/python_binding/operator/pybind_LRN.cpp
@@ -30,6 +30,15 @@ void init_LRN(py::module& m) {
         .def(py::init<std::int32_t>(), py::arg("size"))
         .def_static("get_inputs_name", &LRN_Op::getInputsName)
         .def_static("get_outputs_name", &LRN_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = LRN_Op::attributesName();
+			for (size_t i = 0; attributes[i] != nullptr; ++i) {
+				result.emplace_back(attributes[i]);
+			}
+				return result;
+		})
         .def_readonly_static("Type", &LRN_Op::Type);
 
     m.def("LRN", &LRN, py::arg("size"), py::arg("name") = "",
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 564fd90be..ab81052d2 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -30,6 +30,15 @@ void init_LeakyReLU(py::module& m) {
         .def(py::init<float>(), py::arg("negative_slope"))
         .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
         .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = LeakyReLU_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<LeakyReLUAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &LeakyReLU_Op::Type);
 
     declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index bdbc1edd3..953e56ebe 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -52,6 +52,15 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("ceil_mode"))
   .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
+
+  .def_static("attributes_name", []() {
+    std::vector<std::string> result;
+    auto attributes = MaxPooling_Op<DIM>::attributesName();
+    for (size_t i = 0; i < size(EnumStrings<MaxPoolingAttr>::data); ++i) {
+      result.emplace_back(attributes[i]);
+    }
+    return result;
+  })
   .def_readonly_static("Type", &MaxPooling_Op<DIM>::Type);
   
   declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Memorize.cpp b/python_binding/operator/pybind_Memorize.cpp
index 3ac112211..f583602c9 100644
--- a/python_binding/operator/pybind_Memorize.cpp
+++ b/python_binding/operator/pybind_Memorize.cpp
@@ -23,7 +23,15 @@ void init_Memorize(py::module& m) {
     py::class_<Memorize_Op, std::shared_ptr<Memorize_Op>, OperatorTensor>(m, "MemorizeOp", py::multiple_inheritance())
         .def(py::init<const std::uint32_t>(), py::arg("end_step"))
         .def_static("get_inputs_name", &Memorize_Op::getInputsName)
-        .def_static("get_outputs_name", &Memorize_Op::getOutputsName);
+        .def_static("get_outputs_name", &Memorize_Op::getOutputsName)
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Memorize_Op::attributesName();
+			for (size_t i = 0;i < size(EnumStrings<MemorizeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		});
 
     declare_registrable<Memorize_Op>(m, "MemorizeOp");
 
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index fe899a75a..7b37bb206 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -50,6 +50,14 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("borderValue") = 0.0)
     .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
     .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Pad_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<PadAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &Pad_Op<DIM>::Type);
 
   declare_registrable<Pad_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index 2040f642b..20606d24d 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -23,6 +23,15 @@ void init_Pop(py::module& m) {
     .def(py::init<>())
     .def_static("get_inputs_name", &Pop_Op::getInputsName)
     .def_static("get_outputs_name", &Pop_Op::getOutputsName)
+
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = Pop_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<PopAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     .def_readonly_static("Type", &Pop_Op::Type);
 
     m.def("Pop", &Pop, py::arg("name") = "");
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 028e45755..d29f6bfe7 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -43,6 +43,14 @@ void declare_ReduceMeanOp(py::module &m) {
     .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes") = std::vector<std::int32_t>(), py::arg("keep_dims") = true, py::arg("noop_with_empty_axes") = false)
     .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = ReduceMean_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<ReduceMeanAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     .def_readonly_static("Type", &ReduceMean_Op::Type)
     ;
   declare_registrable<ReduceMean_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp
index eaa57ef1c..f139f2e7b 100644
--- a/python_binding/operator/pybind_ReduceSum.cpp
+++ b/python_binding/operator/pybind_ReduceSum.cpp
@@ -43,6 +43,15 @@ void init_ReduceSum(py::module &m) {
     .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes"), py::arg("keep_dims"), py::arg("noop_with_empty_axes"))
     .def_static("get_inputs_name", &ReduceSum_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceSum_Op::getOutputsName)
+
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = ReduceSum_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<ReduceSumAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     ;
   declare_registrable<ReduceSum_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index e3244f5dd..d263796ce 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -35,6 +35,15 @@ void init_Reshape(py::module& m) {
     .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
     .def_static("get_inputs_name", &Reshape_Op::getInputsName)
     .def_static("get_outputs_name", &Reshape_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Reshape_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ReshapeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &Reshape_Op::Type);
 
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
index 2aa626098..10a60e1f9 100644
--- a/python_binding/operator/pybind_Resize.cpp
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -25,10 +25,18 @@ namespace Aidge {
 void init_Resize(py::module &m) {
   py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(
           m, "ResizeOp", py::multiple_inheritance())
-          .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge)
-          .def_static("get_inputs_name", &Resize_Op::getInputsName)
-          .def_static("get_outputs_name", &Resize_Op::getOutputsName)
-          .def_readonly_static("Type", &Resize_Op::Type);
+        .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge)
+        .def_static("get_inputs_name", &Resize_Op::getInputsName)
+        .def_static("get_outputs_name", &Resize_Op::getOutputsName)
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Resize_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ResizeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+		    return result;
+		})
+        .def_readonly_static("Type", &Resize_Op::Type);
 
   declare_registrable<Resize_Op>(m, "ResizeOp");
 
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index c555bca89..ba975bb06 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -41,6 +41,15 @@ void init_Scaling(py::module& m) {
              py::arg("is_output_unsigned"))
         .def_static("get_inputs_name", &Scaling_Op::getInputsName)
         .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Scaling_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ScalingAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Scaling_Op::Type);
 
     declare_registrable<Scaling_Op>(m, "ScalingOp");
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index cc7669a24..3c8974bf0 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -34,6 +34,15 @@ void init_Shape(py::module& m) {
         .def(py::init<const std::int64_t, const std::int64_t>(), py::arg("start"), py::arg("end"))
         .def_static("get_inputs_name", &Shape_Op::getInputsName)
         .def_static("get_outputs_name", &Shape_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Shape_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ShapeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Shape_Op::Type);
 
     declare_registrable<Shape_Op>(m, "ShapeOp");
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index f01751b86..1cfd63f65 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -45,6 +45,15 @@ void init_Slice(py::module& m) {
                   py::arg("steps") = std::vector<std::int64_t>())
     .def_static("get_inputs_name", &Slice_Op::getInputsName)
     .def_static("get_outputs_name", &Slice_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Slice_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<SliceAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &Slice_Op::Type);
 
     declare_registrable<Slice_Op>(m, "SliceOp");
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 093f448e4..7a4a687fd 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -30,6 +30,15 @@ void init_Softmax(py::module& m) {
         .def(py::init<std::int32_t>(), py::arg("axis"))
         .def_static("get_inputs_name", &Softmax_Op::getInputsName)
         .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Softmax_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<SoftmaxAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Softmax_Op::Type);
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
index f02a699e4..052fa277e 100644
--- a/python_binding/operator/pybind_Split.cpp
+++ b/python_binding/operator/pybind_Split.cpp
@@ -36,6 +36,15 @@ void init_Split(py::module& m) {
             py::arg("split"))
     .def_static("get_inputs_name", &Split_Op::getInputsName)
     .def_static("get_outputs_name", &Split_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Split_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<SplitAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &Split_Op::Type);
 
     declare_registrable<Split_Op>(m, "SplitOp");
diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
index f7ee4d722..7808c78da 100644
--- a/python_binding/operator/pybind_Squeeze.cpp
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -34,6 +34,15 @@ void init_Squeeze(py::module &m) {
     )mydelimiter")
     .def_static("get_inputs_name", &Squeeze_Op::getInputsName)
     .def_static("get_outputs_name", &Squeeze_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Squeeze_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<SqueezeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def("axes", &Squeeze_Op::axes);
 
     declare_registrable<Squeeze_Op>(m, "SqueezeOp");
diff --git a/python_binding/operator/pybind_Stack.cpp b/python_binding/operator/pybind_Stack.cpp
index c9bd969fa..026167446 100644
--- a/python_binding/operator/pybind_Stack.cpp
+++ b/python_binding/operator/pybind_Stack.cpp
@@ -26,6 +26,15 @@ void init_Stack(py::module &m) {
         .def(py::init<const std::uint32_t>(), py::arg("max_elements"))
         .def_static("get_inputs_name", &StackOp::getInputsName)
         .def_static("get_outputs_name", &StackOp::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = StackOp::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<StackAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &StackOp::s_type);
 
     m.def("Stack",
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index 20794a155..1882aa4c4 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -38,6 +38,14 @@ void declare_Transpose(py::module &m) {
     .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order")=std::vector<std::size_t>())
     .def_static("get_inputs_name", &Transpose_Op::getInputsName)
     .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = Transpose_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<TransposeAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     .def_readonly_static("Type", &Transpose_Op::Type);
   declare_registrable<Transpose_Op>(m, pyClassName);
   m.def("Transpose", &Transpose, py::arg("output_dims_order")=std::vector<std::size_t>(), py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index c21a7bcfa..1ef94202c 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -30,6 +30,14 @@ void init_Unsqueeze(py::module &m) {
       // Here we bind the methods of the Unsqueeze_Op that will want to access
       .def_static("get_inputs_name", &Unsqueeze_Op::getInputsName)
       .def_static("get_outputs_name", &Unsqueeze_Op::getOutputsName)
+        .def_static("attributes_name", []() {
+            std::vector<std::string> result;
+            auto attributes = Unsqueeze_Op::attributesName();
+            for (size_t i = 0; i < size(EnumStrings<UnsqueezeAttr>::data); ++i) {
+                result.emplace_back(attributes[i]);
+            }
+            return result;
+        })
       .def_readonly_static("Type", &Unsqueeze_Op::Type)
       ;
 
-- 
GitLab


From 3b6a7bb2e5f1c964bfdd3daef7a4e0b0fdbb0444 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Tue, 18 Feb 2025 09:03:28 +0000
Subject: [PATCH 26/31] [Test] Add a test to ensure attributes follow snake
 case convention.

---
 aidge_core/unit_tests/test_naming.py | 40 ++++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)
 create mode 100644 aidge_core/unit_tests/test_naming.py

diff --git a/aidge_core/unit_tests/test_naming.py b/aidge_core/unit_tests/test_naming.py
new file mode 100644
index 000000000..af86dd050
--- /dev/null
+++ b/aidge_core/unit_tests/test_naming.py
@@ -0,0 +1,40 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+import inspect
+import re
+
+def is_snake_case(s: str) -> bool:
+    return bool(re.fullmatch(r'^[a-z]+(_[a-z]+)*$', s))
+
+class test_naming(unittest.TestCase):
+    """Test tensor binding
+    """
+    def setUp(self):
+        pass
+    def tearDown(self):
+        pass
+
+    def test_attributes_name(self):
+
+        for obj in inspect.getmembers(aidge_core):
+            if (inspect.isclass(obj[1]) and issubclass(obj[1], aidge_core.Operator) and obj[1] is not aidge_core.Operator) and hasattr(obj[1], "attributes_name"):
+                print(obj[0])
+                print(obj[1].attributes_name())
+                for attr_name in obj[1].attributes_name():
+                    self.assertTrue(is_snake_case(attr_name), f"Operator {obj[0]} has an attribute {attr_name} that is not in snake_case.")
+
+
+
+        pass
+if __name__ == '__main__':
+    unittest.main()
-- 
GitLab


From d544efba3c19da6bc54408dfa1b5ca1060b8c319 Mon Sep 17 00:00:00 2001
From: Cyril Moineau <cyril.moineau@cea.fr>
Date: Tue, 18 Feb 2025 09:11:37 +0000
Subject: [PATCH 27/31] Apply 1 suggestion(s) to 1 file(s)

---
 aidge_core/unit_tests/test_naming.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/aidge_core/unit_tests/test_naming.py b/aidge_core/unit_tests/test_naming.py
index af86dd050..eed7180ce 100644
--- a/aidge_core/unit_tests/test_naming.py
+++ b/aidge_core/unit_tests/test_naming.py
@@ -35,6 +35,5 @@ class test_naming(unittest.TestCase):
 
 
 
-        pass
 if __name__ == '__main__':
     unittest.main()
-- 
GitLab


From 645ed07af7b906885e5c2ea9b836e0a5a72a6ae8 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Wed, 19 Feb 2025 09:57:29 +0000
Subject: [PATCH 28/31] Move declaration enumstring attr for clang
 compatibility.

---
 include/aidge/operator/ArgMax.hpp             | 26 +++---
 include/aidge/operator/AvgPooling.hpp         | 23 +++--
 include/aidge/operator/BatchNorm.hpp          | 14 +--
 include/aidge/operator/BitShift.hpp           | 18 ++--
 include/aidge/operator/Cast.hpp               | 14 +--
 include/aidge/operator/Clip.hpp               | 21 +++--
 include/aidge/operator/Concat.hpp             | 24 ++---
 include/aidge/operator/ConstantOfShape.hpp    | 11 ++-
 include/aidge/operator/Conv.hpp               | 31 ++++---
 include/aidge/operator/ConvDepthWise.hpp      | 30 +++---
 include/aidge/operator/DepthToSpace.hpp       | 13 +--
 include/aidge/operator/Flatten.hpp            | 13 +--
 include/aidge/operator/Fold.hpp               | 28 +++---
 include/aidge/operator/Gather.hpp             | 12 ++-
 include/aidge/operator/GridSample.hpp         | 21 +++--
 include/aidge/operator/Heaviside.hpp          | 18 ++--
 include/aidge/operator/LRN.hpp                | 30 +++---
 include/aidge/operator/LeakyReLU.hpp          | 18 ++--
 include/aidge/operator/MaxPooling.hpp         | 23 +++--
 include/aidge/operator/Memorize.hpp           | 29 +++---
 include/aidge/operator/Pad.hpp                | 66 ++++++-------
 include/aidge/operator/Pop.hpp                | 23 ++---
 include/aidge/operator/Producer.hpp           | 92 +++++++++----------
 include/aidge/operator/ReduceMean.hpp         | 21 +++--
 include/aidge/operator/ReduceSum.hpp          | 15 +--
 include/aidge/operator/Reshape.hpp            | 29 +++---
 include/aidge/operator/Resize.hpp             | 23 ++---
 include/aidge/operator/Scaling.hpp            | 25 ++---
 include/aidge/operator/Shape.hpp              | 20 ++--
 include/aidge/operator/Slice.hpp              | 14 +--
 include/aidge/operator/Softmax.hpp            | 20 ++--
 include/aidge/operator/Split.hpp              | 19 ++--
 include/aidge/operator/Squeeze.hpp            | 14 +--
 include/aidge/operator/Stack.hpp              | 19 ++--
 include/aidge/operator/Transpose.hpp          | 21 +++--
 include/aidge/operator/Unfold.hpp             | 31 ++++---
 include/aidge/operator/Unsqueeze.hpp          | 14 +--
 include/aidge/operator/WeightInterleaving.hpp | 10 +-
 38 files changed, 464 insertions(+), 429 deletions(-)

diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index 6d24d87bd..bc97e1f5b 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -41,20 +41,28 @@ enum class ArgMaxAttr {
      */
     SelectLastIndex
 };
-
+} // namespace Aidge
+/**
+ * @brief Provides string representations for the ArgMaxAttr enumeration.
+ */
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
+}
+namespace Aidge {
 /**
  * @brief Description of the ArgMax operation on a Tensor.
  *
  * The ArgMax operation identifies the index of the maximum value along a specified axis of a Tensor.
  *
- * The output of the ArgMax operation can retain the dimensionality of the input Tensor or reduce 
- * it by removing the specified axis. Additionally, in cases where multiple maximum values exist, 
+ * The output of the ArgMax operation can retain the dimensionality of the input Tensor or reduce
+ * it by removing the specified axis. Additionally, in cases where multiple maximum values exist,
  * the user can specify whether to select the first or the last occurrence of the maximum value.
  *
  * Attributes:
  * - `Axis`: The axis along which the ArgMax operation is performed. For example, if the axis is `0`,
  *   the operation is applied along rows; if it is `1`, it is applied along columns.
- * - `KeepDims`: A boolean indicating whether to retain the reduced axis as a dimension of size `1` 
+ * - `KeepDims`: A boolean indicating whether to retain the reduced axis as a dimension of size `1`
  *   (`true`) or to completely remove it (`false`).
  * - `SelectLastIndex`: A boolean indicating how to handle ties (multiple maximum values along the axis):
  *   - If `true`, the last index of the maximum value is selected.
@@ -183,7 +191,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ArgMaxAttr>::data; 
+		return EnumStrings<Aidge::ArgMaxAttr>::data;
 	}
 };
 
@@ -206,12 +214,6 @@ std::shared_ptr<Node> ArgMax(std::int32_t axis = 0,
 
 }  // namespace Aidge
 
-/**
- * @brief Provides string representations for the ArgMaxAttr enumeration.
- */
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
-}
+
 
 #endif /* AIDGE_CORE_OPERATOR_ARGMAX_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 7e02a94ab..e73387ce1 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -49,13 +49,23 @@ enum class AvgPoolingAttr {
      */
     CeilMode
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief String representation of the AvgPooling attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
+        "stride_dims", "kernel_dims", "dilations", "ceil_mode"
+    };
+}
+namespace Aidge {
 /**
  * @brief Class representing an Average Pooling operation.
  *
  * The AvgPooling operation computes the average value within sliding windows of specified size
  * (kernel dimensions) over the input tensor. The stride dimensions determine how the window
- * moves across the input. The dilation parameter allows spacing between kernel elements, and 
+ * moves across the input. The dilation parameter allows spacing between kernel elements, and
  * `ceil_mode` determines whether to use ceiling instead of floor when computing the output shape.
  * This operation is commonly used in neural networks to reduce spatial dimensions while preserving features.
  *
@@ -229,7 +239,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::AvgPoolingAttr>::data; 
+		return EnumStrings<Aidge::AvgPoolingAttr>::data;
 	}
 };
 
@@ -280,12 +290,5 @@ extern template class Aidge::AvgPooling_Op<2>;
 extern template class Aidge::AvgPooling_Op<3>;
 extern template class Aidge::AvgPooling_Op<4>;
 
-namespace {
-/**
- * @brief String representation of the AvgPooling attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = { "stride_dims", "kernel_dims", "dilations", "ceil_mode" };
-}
 
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 995179d7f..3521c9b16 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -50,7 +50,12 @@ enum class BatchNormAttr {
    */
   TrainingMode
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum", "training_mode" };
+}
+namespace Aidge {
 /**
  * @class BatchNorm_Op
  * @brief Implements the Batch Normalization (BN) operation, a technique used to normalize the inputs of a layer.
@@ -158,7 +163,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::BatchNormAttr>::data; 
+		return EnumStrings<Aidge::BatchNormAttr>::data;
 	}
 };
 
@@ -178,9 +183,4 @@ extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const bool, const std::string&);
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const bool, const std::string&);
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum", "training_mode" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_BATCHNORM_H_ */
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index d066507dd..3e9f8c3f2 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -32,7 +32,15 @@ enum class BitShiftAttr {
      */
     BitShiftdirection
 };
-
+}
+namespace {
+    /**
+     * @brief Specialization of `EnumStrings` for `BitShiftAttr`.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = {"bit_shift_direction"};
+}
+namespace Aidge {
 /**
  * @class BitShift_Op
  * @brief A tensor operator to perform element-wise bitwise shift operations on tensors.
@@ -169,12 +177,6 @@ inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direc
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Specialization of `EnumStrings` for `BitShiftAttr`.
- */
-template <>
-const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = {"bit_shift_direction"};
-}
+
 
 #endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 12c3a280a..b2ffbb553 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -40,7 +40,12 @@ enum class CastAttr {
      */
     TargetType
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
+}
+namespace Aidge {
 /**
  * @brief Description of the Cast operation to convert a tensor's data type.
  *
@@ -143,7 +148,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::CastAttr>::data; 
+		return EnumStrings<Aidge::CastAttr>::data;
 	}
 };
 
@@ -157,9 +162,4 @@ std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name =
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
index 93c042d86..51ecb6eb3 100644
--- a/include/aidge/operator/Clip.hpp
+++ b/include/aidge/operator/Clip.hpp
@@ -33,14 +33,23 @@ enum class ClipAttr {
     Min,  /**< Minimum value for clipping. */
     Max   /**< Maximum value for clipping. */
 };
+}
+namespace {
+    /**
+     * @brief Specialization of EnumStrings for ClipAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::ClipAttr>::data[] = { "min", "max" };
+}
 
+namespace Aidge {
 /**
  * @brief Description of the Clip operation to limit tensor values within a specified range.
  *
  * The Clip operator ensures tensor elements are within the range `[min, max]`.
  * - Values less than `min` are set to `min`.
  * - Values greater than `max` are set to `max`.
- * 
+ *
  * The input and output Tensors have the same dimensions.
  *
  * ### Attributes:
@@ -154,7 +163,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ClipAttr>::data; 
+		return EnumStrings<Aidge::ClipAttr>::data;
 	}
 };
 
@@ -173,12 +182,4 @@ std::shared_ptr<Aidge::Node> Clip(
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Specialization of EnumStrings for ClipAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::ClipAttr>::data[] = { "min", "max" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_CLIP_H_ */
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 7a4ea74a4..1f8a357a8 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -58,7 +58,17 @@ enum class ConcatAttr {
      */
     Axis
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief Specialization of EnumStrings for ConcatAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
+        "axis"
+    };
+}
+namespace Aidge {
 /**
  * @class Concat_Op
  * @brief Implements the Concat operation to concatenate multiple tensors along a specified axis.
@@ -175,7 +185,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ConcatAttr>::data; 
+		return EnumStrings<Aidge::ConcatAttr>::data;
 	}
 };
 
@@ -190,14 +200,4 @@ std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0,
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Specialization of EnumStrings for ConcatAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
-    "axis"
-};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_CONCAT_H_ */
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index d837d108a..6176f69dd 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -40,6 +40,12 @@ enum class ConstantOfShapeAttr {
   Value,
 };
 
+namespace {
+  template <>
+  const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"value"};
+  }
+  
+
 /**
  * @brief This operator's purpose is to generate a tensor of shape given via
  * input and filled with a given value set via attribute.
@@ -135,10 +141,5 @@ inline std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f),
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"value"};
-}
-
 #endif // AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
 
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 7beea057e..135ff8860 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -40,15 +40,24 @@ enum class ConvAttr {
     DilationDims,   // The dilation dimensions
     KernelDims      // The kernel dimensions
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
+        "stride_dims",
+        "dilation_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @class Conv_Op
  * @brief Convolution operator for performing a multi-dimensional convolution.
- * 
- * The Conv_Op class implements a convolution operator for tensors with customizable 
- * kernel dimensions, stride, and dilation values. The operator performs a convolution 
+ *
+ * The Conv_Op class implements a convolution operator for tensors with customizable
+ * kernel dimensions, stride, and dilation values. The operator performs a convolution
  * operation on the input tensor and produces an output tensor.
- * 
+ *
  * ### Attributes:
  * - `strideDims`: Stride for each dimension of the input.
  * - `dilationDims`: Dilation for each dimension of the input.
@@ -63,7 +72,7 @@ enum class ConvAttr {
  *      - Stride dimensions: {1, 1} (stride of 1 in both height and width)
  *      - Dilation dimensions: {1, 1} (no dilation)
  *      - Padding: None
- *      - Output shape: 
+ *      - Output shape:
  *         (1, 64, (32−3+2×0)/1+1, (32−3+2×0)/1+1) = (1, 64, 30, 30)
  *
  * @see OperatorTensor
@@ -215,7 +224,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ConvAttr>::data; 
+		return EnumStrings<Aidge::ConvAttr>::data;
 	}
 };
 
@@ -268,13 +277,5 @@ inline std::shared_ptr<Node> Conv(
 extern template class Aidge::Conv_Op<1>;
 extern template class Aidge::Conv_Op<2>;
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
-    "stride_dims",
-    "dilation_dims",
-    "kernel_dims"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 3090b9feb..b307d67a6 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -34,15 +34,24 @@ enum class ConvDepthWiseAttr {
     DilationDims, // The dilation dimensions for the convolution.
     KernelDims    // The kernel dimensions for the convolution.
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {
+        "stride_dims",
+        "dilation_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @class ConvDepthWise_Op
  * @brief Depthwise Convolution operator for performing a multi-dimensional depthwise convolution.
- * 
- * The ConvDepthWise_Op class implements a depthwise convolution operator for tensors with customizable 
- * kernel dimensions, stride, and dilation values. It performs a depthwise convolution operation on the 
+ *
+ * The ConvDepthWise_Op class implements a depthwise convolution operator for tensors with customizable
+ * kernel dimensions, stride, and dilation values. It performs a depthwise convolution operation on the
  * input tensor and produces an output tensor.
- * 
+ *
  * ### Attributes:
  * - strideDims: Stride for each dimension of the input.
  * - dilationDims: Dilation for each dimension of the input.
@@ -195,7 +204,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ConvDepthWiseAttr>::data; 
+		return EnumStrings<Aidge::ConvDepthWiseAttr>::data;
 	}
 };
 
@@ -245,13 +254,4 @@ inline std::shared_ptr<Node> ConvDepthWise(
 extern template class Aidge::ConvDepthWise_Op<1>;
 extern template class Aidge::ConvDepthWise_Op<2>;
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {
-    "stride_dims",
-    "dilation_dims",
-    "kernel_dims"
-};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index cc51ea180..c99f7bbb7 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -51,7 +51,12 @@ enum class DepthToSpaceAttr {
     BlockSize, /**< The block size for rearranging depth to spatial dimensions. */
     Mode       /**< The mode for depth-to-space transformation. */
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
+}
+namespace Aidge{
 /**
  * @class DepthToSpace_Op
  * @brief Represents the DepthToSpace operation to rearrange data from depth to spatial dimensions.
@@ -170,7 +175,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::DepthToSpaceAttr>::data; 
+		return EnumStrings<Aidge::DepthToSpaceAttr>::data;
 	}
 };
 
@@ -187,9 +192,5 @@ std::shared_ptr<Node> DepthToSpace(const std::uint32_t blockSize,
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
-}
 
 #endif //AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
diff --git a/include/aidge/operator/Flatten.hpp b/include/aidge/operator/Flatten.hpp
index 10ce58ad0..b61fc6912 100644
--- a/include/aidge/operator/Flatten.hpp
+++ b/include/aidge/operator/Flatten.hpp
@@ -54,7 +54,12 @@ enum class FlattenAttr {
      */
     Axis
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::FlattenAttr>::data[] = { "axis" };
+}
+namespace Aidge {
 /**
  * @brief Description the Flatten operation to reshape a tensor into a 2D matrix.
  *
@@ -161,7 +166,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::FlattenAttr>::data; 
+		return EnumStrings<Aidge::FlattenAttr>::data;
 	}
 };
 
@@ -179,9 +184,5 @@ std::shared_ptr<Node> Flatten(std::int64_t axis = 1,
                             const std::string &name = "");
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::FlattenAttr>::data[] = { "axis" };
-}
 
 #endif /* AIDGE_CORE_OPERATOR_FLATTEN_H_ */
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index 9d2d4e0df..2f9974e8e 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -64,7 +64,17 @@ enum class FoldAttr {
      */
     KernelDims
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::FoldAttr>::data[] = {
+        "output_dims",
+        "stride_dims",
+        "dilation_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @class Fold_Op
  * @brief Implements the Fold operation to combine or transform tensor dimensions.
@@ -82,7 +92,7 @@ enum class FoldAttr {
  *       output height (out_h) = floor((input height - kernel height) / stride height) + 1
  *       output width (out_w) = floor((input width - kernel width) / stride width) + 1
  *      - The exact output shape will depend on these calculations for each spatial dimension (height, width) and the number of output channels.
- *         
+ *
  * @example:
  *  - Input shape: (1, 16, 32, 32)  // Batch size: 1, Channels: 16, Height: 32, Width: 32
  *  - Kernel dimensions: (3, 3)  // 3x3 kernel
@@ -216,13 +226,13 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::FoldAttr>::data; 
+		return EnumStrings<Aidge::FoldAttr>::data;
 	}
 };
 
 /**
  * @brief Create a Fold operation node.
- * 
+ *
  * This function creates a Fold operation node that applies a fold transformation
  * to a tensor based on the specified attributes.
  *
@@ -255,14 +265,4 @@ extern template class Aidge::Fold_Op<2>;
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::FoldAttr>::data[] = {
-    "output_dims",
-    "stride_dims",
-    "dilation_dims",
-    "kernel_dims"
-};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_FOLD_H_ */
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 3842a041e..86fc7bc78 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -61,6 +61,12 @@ enum class GatherAttr {
     GatheredShape
 };
 
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
+}
+namespace Aidge {
 /**
  * @brief Description for the Gather operation on an input tensor.
  *
@@ -190,7 +196,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::GatherAttr>::data; 
+		return EnumStrings<Aidge::GatherAttr>::data;
 	}
 };
 
@@ -213,9 +219,5 @@ std::shared_ptr<Node> Gather(std::int8_t axis = 0,
 
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index 28c5fb5e5..066422311 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -29,6 +29,16 @@ enum class GridSampleAttr {
 	PaddingMode,	// Specifies how to handle out-of-boundary grid values.
 	AlignCorners	// Determines whether grid values are normalized to align with the image corners.
 };
+} // namespace Aidge
+namespace {
+	template <>
+	const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
+		"mode",
+		"padding_mode",
+		"align_corners"
+	};
+}
+namespace Aidge {
 
 /**
  * @class GridSample_Op
@@ -176,7 +186,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::GridSampleAttr>::data; 
+		return EnumStrings<Aidge::GridSampleAttr>::data;
 	}
 };
 
@@ -197,13 +207,4 @@ std::shared_ptr<Node> GridSample(
 
 } // namespace Aidge
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
-    "mode",
-    "padding_mode",
-    "align_corners"
-};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_GRIDSAMPLE_H_ */
diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp
index 874853c4e..806ed47f3 100644
--- a/include/aidge/operator/Heaviside.hpp
+++ b/include/aidge/operator/Heaviside.hpp
@@ -31,6 +31,15 @@ enum class HeavisideAttr {
      */
     Value
 };
+} // namespace Aidge
+namespace {
+    /**
+     * @brief Define string representations for Heaviside attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"};
+}
+namespace Aidge {
 
 /**
  * @class Heaviside_Op
@@ -115,7 +124,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::HeavisideAttr>::data; 
+		return EnumStrings<Aidge::HeavisideAttr>::data;
 	}
 
     /**
@@ -149,12 +158,5 @@ std::shared_ptr<Node> Heaviside(float value, const std::string &name = "");
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Define string representations for Heaviside attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_HEAVISIDE_H_ */
diff --git a/include/aidge/operator/LRN.hpp b/include/aidge/operator/LRN.hpp
index 9019c089b..6c82b6b46 100644
--- a/include/aidge/operator/LRN.hpp
+++ b/include/aidge/operator/LRN.hpp
@@ -30,20 +30,28 @@ enum class LRNAttr {
     Bias,   ///< Constant bias added to the normalization term.
     Size    ///< Number of channels to normalize over.
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for LRNAttr.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size", nullptr};
+}
+namespace Aidge {
 /**
  * @brief Description of a Local Response Normalization (LRN) operation on an input Tensor.
  *
- * LRN is a normalization technique that applies across channels in a local region 
- * to enhance generalization and promote competition between neurons. It is commonly 
+ * LRN is a normalization technique that applies across channels in a local region
+ * to enhance generalization and promote competition between neurons. It is commonly
  * used in Convolutional Neural Networks (CNNs).
  *
  * For each element x in the input Tensor, the function is defined as:
  * `f(x) = x / (bias + alpha * sum(x_i^2))^beta`, where:
  * - `x` is the current element being normalized.
- * - The summation `sum(x_i^2)` is taken over a local region of `size` channels 
+ * - The summation `sum(x_i^2)` is taken over a local region of `size` channels
  *   surrounding `x` (both before and after the current channel, if available).
- * - `bias`, `alpha`, and `beta` are scalar hyperparameters controlling the 
+ * - `bias`, `alpha`, and `beta` are scalar hyperparameters controlling the
  *   normalization behavior.
  *
  * Parameters:
@@ -52,7 +60,7 @@ enum class LRNAttr {
  * - `alpha`: A scaling factor for the squared sum of elements in the local region.
  * - `beta`: The exponent applied to the normalization term.
  *
- * The input and output Tensors have the same shape. If the input Tensor has shape `(N, C, H, W)`, 
+ * The input and output Tensors have the same shape. If the input Tensor has shape `(N, C, H, W)`,
  * the output Tensor will also have shape `(N, C, H, W)`.
  *
  * @see OperatorTensor
@@ -164,7 +172,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::LRNAttr>::data; 
+		return EnumStrings<Aidge::LRNAttr>::data;
 	}
 };
 
@@ -179,12 +187,4 @@ std::shared_ptr<Node> LRN(std::int32_t size, const std::string& name = "");
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for LRNAttr.
- */
-template <>
-const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size", nullptr};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_LRN_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 5381b3cb1..acf9bae7f 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -30,7 +30,13 @@ enum class LeakyReLUAttr {
      */
     NegativeSlope
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
+        = {"negative_slope"};
+    }
+namespace Aidge{
 /**
  * @class LeakyReLU_Op
  * @brief Implements the LeakyReLU activation function.
@@ -77,7 +83,7 @@ public:
     /**
      * @brief Copy-constructor.
      * @param[in] op LeakyReLU_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not its input tensors. 
+     * @details Copies the operator attributes and its output tensor(s), but not its input tensors.
      * The new operator has no associated input.
      */
     LeakyReLU_Op(const LeakyReLU_Op& op);
@@ -121,7 +127,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::LeakyReLUAttr>::data; 
+		return EnumStrings<Aidge::LeakyReLUAttr>::data;
 	}
 };
 
@@ -135,10 +141,4 @@ public:
 std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "");
 }
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
-    = {"negative_slope"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_LEAKYRELU_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index f4f38de4a..d90aab4a0 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -59,6 +59,16 @@ enum class MaxPoolingAttr {
    */
   CeilMode,
 };
+} // namespace Aidge
+namespace {
+    /**
+     * @brief String representations of MaxPooling attributes for debugging and logging.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "dilations", "ceil_mode"};
+    }
+
+namespace Aidge{
 
 /**
  * @class MaxPooling_Op
@@ -66,8 +76,8 @@ enum class MaxPoolingAttr {
  * @brief Implements the MaxPooling operation over a specified input tensor.
  *
  * MaxPooling reduces spatial dimensions by applying a max filter over a sliding window.
- * The stride dimensions determine how the window moves across the input. The dilation 
- * parameter allows spacing between kernel elements, and `ceil_mode` determines whether 
+ * The stride dimensions determine how the window moves across the input. The dilation
+ * parameter allows spacing between kernel elements, and `ceil_mode` determines whether
  * to use ceiling instead of floor when computing the output shape.
  *
  * ### Output Shape Calculation
@@ -204,7 +214,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::MaxPoolingAttr>::data; 
+		return EnumStrings<Aidge::MaxPoolingAttr>::data;
 	}
 };
 
@@ -255,12 +265,5 @@ inline std::shared_ptr<Node> MaxPooling(
 
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief String representations of MaxPooling attributes for debugging and logging.
- */
-template <>
-const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "dilations", "ceil_mode"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 10bbfce85..59df17ec1 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -120,10 +120,22 @@ enum class MemorizeAttr {
     ForwardStep,    // Tracks the current step in the forward pass.
     EndStep         // The final step for which memory updates will occur.
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief String representations of the Memorize operator's attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
+        "schedule_step",
+        "forward_step",
+        "end_step"
+    };
+}
+namespace Aidge {
 /**
  * @class Memorize_Op
- * @brief The Memorize Operator is responsible for storing a tensor's state over a defined 
+ * @brief The Memorize Operator is responsible for storing a tensor's state over a defined
  * number of iterations and providing the stored value as output at each iteration.
  *
  *  Memorize operators are used in models with recurrent structures or feedback loops, such as LSTMs.
@@ -246,7 +258,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::MemorizeAttr>::data; 
+		return EnumStrings<Aidge::MemorizeAttr>::data;
 	}
 };
 
@@ -259,16 +271,5 @@ public:
 std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "");
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief String representations of the Memorize operator's attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
-    "schedule_step",
-    "forward_step",
-    "end_step"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_MEMORIZE_H_ */
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 417e9664c..de7c3d2b2 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -36,6 +36,18 @@ enum class PadAttr {
     BorderValue      ///< Value to be used for constant padding.
 };
 
+namespace {
+    /**
+     * @brief EnumStrings specialization for PadAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::PadAttr>::data[] = {
+        "begin_end_borders",
+        "border_type",
+        "border_value"
+    };
+}  // namespace
+
 /**
  * @enum PadBorderType
  * @brief Types of border handling available for padding.
@@ -47,7 +59,19 @@ enum class PadBorderType {
     Wrap,     ///< Values wrap around the tensor dimensions.
     Zero      ///< All out-of-bound values are set to 0.
 };
-
+} // namespace Aidge
+/**
+ * @brief EnumStrings specialization for PadBorderType.
+ */
+template <>
+const char* const EnumStrings<Aidge::PadBorderType>::data[] = {
+    "Constant",
+    "Edge",
+    "Reflect",
+    "Wrap",
+    "Zero"
+};
+namespace Aidge {
 /**
  * @class Pad_Op
  * @brief Implementation of the Pad operator.
@@ -64,14 +88,14 @@ enum class PadBorderType {
  * The operator supports various border handling techniques (e.g., constant padding, reflection, wrapping).
  *
  * ### Output Tensor Shape:
- * If the input tensor has a shape `[B, C, d1, d2, ..., dN]`, where `B` is the batch size, 
- * `C` is the number of channels, and `[d1, d2, ..., dN]` are the spatial dimensions, 
- * and the padding is defined by `beginEndTuples = {b1, e1, b2, e2, ..., bN, eN}`, 
+ * If the input tensor has a shape `[B, C, d1, d2, ..., dN]`, where `B` is the batch size,
+ * `C` is the number of channels, and `[d1, d2, ..., dN]` are the spatial dimensions,
+ * and the padding is defined by `beginEndTuples = {b1, e1, b2, e2, ..., bN, eN}`,
  * the output tensor shape will be:
- * 
+ *
  * `[B, C, d1 + b1 + e1, d2 + b2 + e2, ..., dN + bN + eN]`.
- * 
- * The padding values `b_i` and `e_i` specify the number of elements to add before and after 
+ *
+ * The padding values `b_i` and `e_i` specify the number of elements to add before and after
  * the corresponding spatial dimension `d_i`. Batch size and channel count remain unchanged.
  *
  * @example Constant Padding:
@@ -92,7 +116,7 @@ enum class PadBorderType {
  *    - Output tensor shape: `[B, C, 4 + 1 + 1, 5 + 2 + 2, 6 + 0 + 0] = [B, C, 6, 9, 6]`
  *    - Padding values mirror the existing tensor values.
  *
- * This operator is commonly used for image processing, extending spatial dimensions while maintaining 
+ * This operator is commonly used for image processing, extending spatial dimensions while maintaining
  * batch and channel consistency, or aligning tensor dimensions in machine learning workflows.
  */
 template <DimIdx_t DIM>
@@ -222,7 +246,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::PadAttr>::data; 
+		return EnumStrings<Aidge::PadAttr>::data;
 	}
 };
 
@@ -258,30 +282,6 @@ inline std::shared_ptr<Node> Pad(
 extern template class Aidge::Pad_Op<1>;
 extern template class Aidge::Pad_Op<2>;
 
-namespace {
-
-/**
- * @brief EnumStrings specialization for PadAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::PadAttr>::data[] = {
-    "begin_end_borders",
-    "border_type",
-    "border_value"
-};
 
-/**
- * @brief EnumStrings specialization for PadBorderType.
- */
-template <>
-const char* const EnumStrings<Aidge::PadBorderType>::data[] = {
-    "Constant",
-    "Edge",
-    "Reflect",
-    "Wrap",
-    "Zero"
-};
-
-}  // namespace
 
 #endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 630c58c0d..d9d52f9bc 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -101,7 +101,17 @@ enum class PopAttr {
     ForwardStep,    // Tracks the current step in the forward pass
     BackwardStep    // Tracks the current step in the backward pass
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief String representations of the `Pop` operator's attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::PopAttr>::data[] = {
+        "forward_step", "backward_step"
+    };
+}
+namespace Aidge {
 /**
  * @class Pop_Op
  * @brief The `Pop` operator is responsible for removing and outputting elements from a data structure.
@@ -217,7 +227,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::PopAttr>::data; 
+		return EnumStrings<Aidge::PopAttr>::data;
 	}
 };
 
@@ -229,14 +239,5 @@ public:
 std::shared_ptr<Node> Pop(const std::string& name = "");
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief String representations of the `Pop` operator's attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::PopAttr>::data[] = {
-    "forward_step", "backward_step"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_POP_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 1d6b96582..3690579d3 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -35,25 +35,33 @@ namespace Aidge {
  * @brief Attributes specific to the `Producer_Op` class.
  */
 enum class ProdAttr { Constant };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief Enum string representation for `ProdAttr`.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::ProdAttr>::data[] = {"constant"};
+}
+namespace Aidge {
 /**
  * @class Producer_Op
  * @brief Represents an operator that stores a tensor in memory and provides it as an output.
- * 
- * The `Producer_Op` class is a specialized operator designed to store a tensor in memory 
- * and return it as an output tensor. It is typically used to store parameters or input 
- * values for a computational graph. A `Producer_Op` does not have any input data, parameters, 
- * or attributes, making it a fundamental building block for constant or initialized values 
+ *
+ * The `Producer_Op` class is a specialized operator designed to store a tensor in memory
+ * and return it as an output tensor. It is typically used to store parameters or input
+ * values for a computational graph. A `Producer_Op` does not have any input data, parameters,
+ * or attributes, making it a fundamental building block for constant or initialized values
  * within the graph.
- * 
+ *
  * Key characteristics of a `Producer_Op`:
  * - No inputs: The operator does not accept any input tensors.
  * - No parameters or attributes: It is solely responsible for producing an output tensor.
  * - Stores and returns a tensor: The stored tensor is accessible as the operator's output.
- * 
- * This operator is useful for scenarios where fixed or pre-initialized tensors need to 
+ *
+ * This operator is useful for scenarios where fixed or pre-initialized tensors need to
  * be introduced into a graph, such as weights, biases, or constant values.
- * 
+ *
  * @see OperatorTensor
  * @see Registrable
  */
@@ -77,7 +85,7 @@ public:
 
     /**
      * @brief Constructs a `Producer_Op` object with specific dimensions.
-     * 
+     *
      * @tparam DIM The number of dimensions for the tensor.
      * @param[in] dims Array defining the dimensions of the tensor.
      * @param[in] constant Indicates whether the tensor is constant.
@@ -87,7 +95,7 @@ public:
 
     /**
      * @brief Constructs a `Producer_Op` object from an existing tensor.
-     * 
+     *
      * @param[in] tensor A shared pointer to the tensor to be produced.
      * @param[in] constant Indicates whether the tensor should be constant.
      */
@@ -95,10 +103,10 @@ public:
 
     /**
      * @brief Copy constructor.
-     * 
-     * Copies the attributes and output tensors of the operator. 
+     *
+     * Copies the attributes and output tensors of the operator.
      * Input tensors are not copied, and the new operator will have no associated inputs.
-     * 
+     *
      * @param[in] op The `Producer_Op` object to copy.
      */
     Producer_Op(const Producer_Op& op);
@@ -106,28 +114,28 @@ public:
 public:
     /**
      * @brief Conversion operator to retrieve the output tensor.
-     * 
+     *
      * @return A shared pointer to the output tensor.
      */
     operator std::shared_ptr<Tensor>() const { return mOutputs[0]; }
 
     /**
      * @brief Clones the operator using the copy constructor.
-     * 
+     *
      * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
     /**
      * @brief Retrieves the dimensions of the output tensor.
-     * 
+     *
      * @return A vector containing the dimensions of the output tensor.
      */
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
     /**
      * @brief Sets the backend for the operator's execution.
-     * 
+     *
      * @param[in] name The name of the backend.
      * @param[in] device The device index (default is 0).
      */
@@ -135,35 +143,35 @@ public:
 
     /**
      * @brief Retrieves the list of available backends for this operator.
-     * 
+     *
      * @return A set containing the names of available backends.
      */
     std::set<std::string> getAvailableBackends() const override;
 
     /**
      * @brief Retrieves the operator's attributes.
-     * 
+     *
      * @return A shared pointer to the operator's attributes.
      */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     /**
      * @brief Retrieves the constant attribute.
-     * 
+     *
      * @return A reference to the constant attribute.
      */
     inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
 
     /**
      * @brief Performs the forward operation for the operator.
-     * 
+     *
      * Generates the output tensor based on the defined attributes and configuration.
      */
     void forward() override final;
 
     /**
      * @brief Placeholder for the backward operation.
-     * 
+     *
      * This function logs a debug message, as `Producer_Op` typically does not support backpropagation.
      */
     void backward() override final {
@@ -172,12 +180,12 @@ public:
 
     /**
      * @brief Associates an input tensor with the operator.
-     * 
+     *
      * This operation is not supported by `Producer_Op` as it does not take inputs.
-     * 
+     *
      * @param[in] inputIdx The index of the input.
      * @param[in] data A shared pointer to the data to associate.
-     * 
+     *
      * @throws std::runtime_error Always throws, as inputs are not supported.
      */
     void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
@@ -186,35 +194,35 @@ public:
 
     /**
      * @brief Checks whether dimensions are forwarded.
-     * 
+     *
      * @return Always true for `Producer_Op`.
      */
     inline bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
 
     /**
      * @brief Confirms that dimensions have been forwarded.
-     * 
+     *
      * @return Always true for `Producer_Op`.
      */
     inline bool dimsForwarded() const noexcept override final { return true; }
 
     /**
      * @brief Retrieves the names of the inputs for the operator.
-     * 
+     *
      * @return An empty vector, as `Producer_Op` takes no inputs.
      */
     static const std::vector<std::string> getInputsName() { return {}; }
 
     /**
      * @brief Retrieves the names of the outputs for the operator.
-     * 
+     *
      * @return A vector containing the output name "data_output".
      */
     static const std::vector<std::string> getOutputsName() { return {"data_output"}; }
 
     /**
      * @brief Sets the output tensor for the operator.
-     * 
+     *
      * @param[in] outputIdx Index of the output to set.
      * @param[in] data A shared pointer to the data.
      */
@@ -223,12 +231,12 @@ public:
 
 /**
  * @brief Helper function to create a producer node with specified dimensions.
- * 
+ *
  * @tparam DIM The number of dimensions.
  * @param[in] dims Array defining the dimensions of the tensor.
  * @param[in] name Optional name for the node.
  * @param[in] constant Indicates whether the tensor should be constant.
- * 
+ *
  * @return A shared pointer to the created node.
  */
 template <std::size_t DIM>
@@ -236,11 +244,11 @@ std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM>& dims, const std
 
 /**
  * @brief Helper function with a C-style array for dimension deduction.
- * 
+ *
  * @param[in] dims C-style array defining the tensor dimensions.
  * @param[in] name Optional name for the node.
  * @param[in] constant Indicates whether the tensor should be constant.
- * 
+ *
  * @return A shared pointer to the created node.
  */
 template <std::size_t DIM>
@@ -257,12 +265,12 @@ std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode,
 
 /**
  * @brief Adds a producer node to another node with a C-style array.
- * 
+ *
  * @param[in] otherNode The node to associate with the producer.
  * @param[in] inputIdx The input index.
  * @param[in] dims C-style array defining the tensor dimensions.
  * @param[in] extension An extension string for the producer.
- * 
+ *
  * @return A shared pointer to the updated node.
  */
 template <std::size_t DIM>
@@ -272,12 +280,4 @@ std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOInde
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Enum string representation for `ProdAttr`.
- */
-template <>
-const char* const EnumStrings<Aidge::ProdAttr>::data[] = {"constant"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index c6d875719..3ee4a1bec 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -51,7 +51,16 @@ enum class ReduceMeanAttr {
    */
   NoopWithEmptyAxes
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {
+        "axes",
+        "keep_dims",
+        "noop_with_empty_axes"
+    };
+}
+namespace Aidge {
 /**
  * @class ReduceMean_Op
  * @brief Implements the ReduceMean operation to compute the mean of a tensor along specified axes.
@@ -170,7 +179,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ReduceMeanAttr>::data; 
+		return EnumStrings<Aidge::ReduceMeanAttr>::data;
 	}
 
     virtual ~ReduceMean_Op() noexcept;
@@ -194,13 +203,5 @@ std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {
-    "axes",
-    "keep_dims",
-    "noop_with_empty_axes"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index 72f6bf9b2..adb58f895 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -52,6 +52,12 @@ enum class ReduceSumAttr {
   NoopWithEmptyAxes
 };
 
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
+}
+namespace Aidge {
 /**
  * @class ReduceSum_Op
  * @brief Implements the ReduceSum operation to compute the sum of a tensor along specified axes.
@@ -100,7 +106,7 @@ public:
     /**
      * @brief constructor for ReduceSum op
      * @param[in] axes around which perform the operation
-     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and
      * if false we remove the dimension completely
      * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
      * and if false, we reduce on all axes
@@ -176,7 +182,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ReduceSumAttr>::data; 
+		return EnumStrings<Aidge::ReduceSumAttr>::data;
 	}
 };
 
@@ -202,9 +208,4 @@ inline std::shared_ptr<Node> ReduceSum(const std::vector<std::int32_t> &axes={},
 }
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 51623737e..e69c42d4d 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -53,21 +53,29 @@ enum class ReshapeAttr {
      * @brief The target shape for the output tensor.
      */
     Shape,
-    
+
     /**
      * @brief Whether zeros in the shape attribute are allowed.
-     * 
+     *
      * When true, zeros in the target shape retain the corresponding dimension size from the input tensor.
      */
     AllowZero
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for ReshapeAttr.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = {"shape", "allow_zero"};
+}
+namespace Aidge {
 /**
  * @brief Description of Reshape operator that adjusts the shape of the input tensor.
  *
- * This operator reshapes the input tensor according to the specified target shape. 
- * If the target shape is not compatible with the input tensor's total number of elements, 
- * the operation will fail. If the `AllowZero` attribute is true, zeros in the target shape 
+ * This operator reshapes the input tensor according to the specified target shape.
+ * If the target shape is not compatible with the input tensor's total number of elements,
+ * the operation will fail. If the `AllowZero` attribute is true, zeros in the target shape
  * retain the corresponding dimensions from the input tensor.
  *
  * @example Input: Tensor of dimensions `[2, 3]` with `Shape = {3, 2}` results in a tensor with dimensions `[3, 2]`.
@@ -182,7 +190,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ReshapeAttr>::data; 
+		return EnumStrings<Aidge::ReshapeAttr>::data;
 	}
 };
 
@@ -200,12 +208,5 @@ std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
 
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for ReshapeAttr.
- */
-template <>
-const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = {"shape", "allow_zero"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_RESHAPE_H_ */
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 3a4ef3771..37d42fcc8 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -39,7 +39,17 @@ enum class ResizeAttr {
     InterpolationMode,
     PaddingMode
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
+        "coordinate_transformation_mode",
+        "cubic_coeff_a",
+        "interpolation_mode",
+        "padding_mode"
+    };
+}
+namespace Aidge {
 /**
  * @brief Resize operator, will up/downscale a given tensor given the input.
  * @verbatim
@@ -197,7 +207,7 @@ class Resize_Op
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ResizeAttr>::data; 
+		return EnumStrings<Aidge::ResizeAttr>::data;
 	}
 };
 
@@ -230,13 +240,4 @@ Resize(std::vector<float> scale = std::vector<float>(),
 
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
-    "coordinate_transformation_mode",
-    "cubic_coeff_a",
-    "interpolation_mode",
-    "padding_mode"
-};
-}
 #endif /* AIDGE_CORE_OPERATOR_RESIZE_H_ */
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index c1f4514c9..fb342d345 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -23,7 +23,7 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-// Caution: This operator is now deprecated and should no longer be used. 
+// Caution: This operator is now deprecated and should no longer be used.
 // It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
 
 namespace Aidge {
@@ -38,7 +38,7 @@ enum class ScalingAttr {
     /**
      * @brief Number of quantization bits.
      *
-     * Specifies the bit-width used for quantization. 
+     * Specifies the bit-width used for quantization.
      * For example, a value of `8` represents 8-bit quantization.
      */
     QuantizedNbBits,
@@ -51,12 +51,18 @@ enum class ScalingAttr {
      */
     IsOutputUnsigned
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::ScalingAttr>::data[]
+        = {"scaling_factor", "quantized_nb_bits", "is_output_unsigned"};
+}
+namespace Aidge {
 /**
  * @brief Description of a scaling operation to scale and quantize input tensors.
  *
- * The `Scaling_Op` class applies a scaling factor to the input tensor, quantizes 
- * the scaled values to a specified bit-width, and outputs either signed or unsigned integers 
+ * The `Scaling_Op` class applies a scaling factor to the input tensor, quantizes
+ * the scaled values to a specified bit-width, and outputs either signed or unsigned integers
  * based on the configuration.
  *
  * The input and output Tensors have the same dimensions.
@@ -94,7 +100,7 @@ public:
     /**
      * @brief Copy-constructor.
      * @param[in] op Scaling_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not its input tensors. 
+     * @details Copies the operator attributes and its output tensor(s), but not its input tensors.
      * The new operator has no associated input.
      */
     Scaling_Op(const Scaling_Op& op);
@@ -140,7 +146,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ScalingAttr>::data; 
+		return EnumStrings<Aidge::ScalingAttr>::data;
 	}
 };
 
@@ -159,10 +165,5 @@ std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
                                      const std::string& name = "");
 } // namespace Aidge
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::ScalingAttr>::data[]
-    = {"scaling_factor", "quantized_nb_bits", "is_output_unsigned"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 84d497abf..2a553fb82 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -62,7 +62,15 @@ enum class ShapeAttr {
      */
     End
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for ShapeAttr.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"start", "end"};
+}
+namespace Aidge {
 /**
  * @brief Description of the operation of extracting the shape of a tensor.
  *
@@ -169,7 +177,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ShapeAttr>::data; 
+		return EnumStrings<Aidge::ShapeAttr>::data;
 	}
 };
 
@@ -185,12 +193,6 @@ std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for ShapeAttr.
- */
-template <>
-const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"start", "end"};
-}
+
 
 #endif /* AIDGE_CORE_OPERATOR_SHAPE_H_ */
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index ea4d21e9a..fa21b3d19 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -84,7 +84,12 @@ enum class SliceAttr {
      */
     Steps
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" };
+}
+namespace Aidge{
 /**
  * @class Slice_Op
  * @brief Implements the Slice operation for extracting sub-tensors.
@@ -209,7 +214,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::SliceAttr>::data; 
+		return EnumStrings<Aidge::SliceAttr>::data;
 	}
 };
 
@@ -231,9 +236,4 @@ std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_SLICE_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index a7d8283a0..86e1a57e7 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -33,7 +33,15 @@ enum class SoftmaxAttr {
      */
     Axis
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for SoftmaxAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"};
+}
+namespace Aidge {
 /**
  * @brief Description of a Softmax operation on input Tensor along a specified axis.
  *
@@ -136,7 +144,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::SoftmaxAttr>::data; 
+		return EnumStrings<Aidge::SoftmaxAttr>::data;
 	}
 };
 
@@ -151,12 +159,4 @@ std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "");
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for SoftmaxAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 9f2beb3aa..8b6acb060 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -65,7 +65,17 @@ enum class SplitAttr {
      */
     Split
 };
+} // namespace Aidge
 
+namespace {
+    /**
+     * @brief EnumStrings specialization for SplitAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::SplitAttr>::data[] = {"axis", "split"};
+    }
+
+namespace Aidge {
 /**
  * @class Split_Op
  * @brief Implements the Split operation to divide a tensor into multiple sub-tensors along a specified axis.
@@ -179,7 +189,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::SplitAttr>::data; 
+		return EnumStrings<Aidge::SplitAttr>::data;
 	}
 };
 
@@ -199,12 +209,5 @@ std::shared_ptr<Node> Split(DimSize_t nbOutput,
 
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for SplitAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::SplitAttr>::data[] = {"axis", "split"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_SPLIT_H_ */
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 9a2cc8f54..69fa9d493 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -48,7 +48,12 @@ enum class SqueezeAttr {
    */
   Axes
 };
-
+} // namespace Aidge
+namespace {
+  template <>
+  const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"axes"};
+}
+namespace Aidge {
 /**
  * @brief This operator has as purpose to remove dummy dimensions around given
  * axes.
@@ -148,7 +153,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::SqueezeAttr>::data; 
+		return EnumStrings<Aidge::SqueezeAttr>::data;
 	}
 };
 
@@ -160,9 +165,4 @@ inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {},
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"axes"};
-}
-
 #endif // AIDGE_CORE_OPERATOR_SQUEEZE_H_
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
index 0e420789d..214428447 100644
--- a/include/aidge/operator/Stack.hpp
+++ b/include/aidge/operator/Stack.hpp
@@ -95,7 +95,15 @@ enum class StackAttr {
     ForwardStep,   // Tracks the current step in the forward pass.
     MaxElements    // Maximum number of elements that can be stacked.
 };
-
+}  // namespace Aidge
+namespace {
+    /**
+     * @brief String representations of the Stack operator's attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::StackAttr>::data[] = {"forward_step", "max_elements"};
+}
+namespace Aidge {
 /**
  * @class StackOp
  * @brief The `Stack` operator performs a stacking operation over a sequence of input tensors.
@@ -218,7 +226,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::StackAttr>::data; 
+		return EnumStrings<Aidge::StackAttr>::data;
 	}
 };
 
@@ -231,12 +239,5 @@ public:
 std::shared_ptr<Node> Stack(std::uint32_t maxElements = 0, const std::string& name = "");
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief String representations of the Stack operator's attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::StackAttr>::data[] = {"forward_step", "max_elements"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_STACK_H_ */
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index d760ccd0d..2619c5ea5 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -54,13 +54,21 @@ public:
 enum class TransposeAttr {
     /**
      * @brief Order of the output dimensions relative to the input dimensions.
-     * 
+     *
      * If this attribute is empty, the dimensions of the input tensor will
      * be reversed.
      */
     OutputDimsOrder
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for TransposeAttr.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"output_dims_order"};
+    }
+namespace Aidge {
 /**
  * @brief Describes the operation of transposing the axes of a given tensor.
  *
@@ -172,7 +180,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::TransposeAttr>::data; 
+		return EnumStrings<Aidge::TransposeAttr>::data;
 	}
 };
 
@@ -188,12 +196,5 @@ std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder =
 
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for TransposeAttr.
- */
-template <>
-const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"output_dims_order"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_TRANSPOSE_H_ */
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index bea32c6cc..d220807d6 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -71,13 +71,25 @@ enum class UnfoldAttr {
      */
     KernelDims
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for UnfoldAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::UnfoldAttr>::data[] = {
+        "stride_dims",
+        "dilation_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @brief Describes the operation of unfolding a tensor into sliding blocks.
- * 
+ *
  * The Unfold operator extracts sliding blocks from the input tensor along
  * specified dimensions, controlled by stride, dilation, and kernel size.
- * 
+ *
  * @tparam DIM Number of dimensions involved in the operation.
  *
  * @example Input: Tensor of dimensions `[1, 3, 32, 32]`, with `KernelDims = {3, 3}`,
@@ -205,7 +217,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::UnfoldAttr>::data; 
+		return EnumStrings<Aidge::UnfoldAttr>::data;
 	}
 };
 
@@ -237,16 +249,5 @@ inline std::shared_ptr<Node> Unfold( DimSize_t const (&kernelDims)[DIM],
 
 extern template class Aidge::Unfold_Op<2>;
 
-namespace {
-/**
- * @brief EnumStrings specialization for UnfoldAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::UnfoldAttr>::data[] = {
-    "stride_dims",
-    "dilation_dims",
-    "kernel_dims"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_UNFOLD_H_ */
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index 8c5909182..a78a98672 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -47,7 +47,12 @@ enum class UnsqueezeAttr {
    */
   Axes
 };
-
+} // namespace Aidge
+namespace {
+  template <>
+  const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"axes"};
+}
+namespace Aidge {
 /**
  * @brief This operator has as purpose to add a dummy dimension around given
  * axis. Unsqueezing the 2nd dim of a tensor of dim (1,2,3,4) will result in a
@@ -146,7 +151,7 @@ public:
 	 * @return A vector containing the attributes name.
 	 */
 	static const char* const* attributesName(){
-		return EnumStrings<Aidge::UnsqueezeAttr>::data; 
+		return EnumStrings<Aidge::UnsqueezeAttr>::data;
 	}
 };
 
@@ -158,9 +163,4 @@ inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"axes"};
-}
-
 #endif // AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
diff --git a/include/aidge/operator/WeightInterleaving.hpp b/include/aidge/operator/WeightInterleaving.hpp
index 315bb3e2d..a8f8c3d74 100644
--- a/include/aidge/operator/WeightInterleaving.hpp
+++ b/include/aidge/operator/WeightInterleaving.hpp
@@ -30,10 +30,10 @@ namespace Aidge {
  * @brief WeightInterleaving operator Compresses the last dimension of a tensor by packing low-bitwidth values
  * (e.g., 2, 3, or 4 bits) into fewer bytes.
  *
- * The operator reduces the size of the last dimension based on the bitwidth (`nb_bits`), 
- * packing multiple values into each byte. For example, 4-bit values result in a halved last dimension, 
+ * The operator reduces the size of the last dimension based on the bitwidth (`nb_bits`),
+ * packing multiple values into each byte. For example, 4-bit values result in a halved last dimension,
  * while 2-bit values reduce it by a factor of 4.
- * 
+ *
  * The output tensor has the same shape as the input, except for the compressed last dimension.
  *
  * @see OperatorTensor
@@ -78,10 +78,10 @@ public:
 
     /**
      * @brief Calculates the required size for the 8-bits`compactData` vector.
-     * 
+     *
      * This function determines the minimum number of bytes needed in `compactData`
      * to store `dataSize` elements compacted to `nb_bits` bits each.
-     * 
+     *
      * @param dataSize The total number of elements in the input data array.
      * @param nb_bits The number of bits to use for each compacted element (from 1 to 7).
      * @return std::size_t The required size in bytes for `compactData`.
-- 
GitLab


From b27558e81b76ab3f03f801721251174cee068bbe Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Thu, 20 Feb 2025 08:15:27 +0000
Subject: [PATCH 29/31] Remove merge conflict artifact.

---
 include/aidge/operator/AvgPooling.hpp | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 367435fe7..6022d6a2a 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -290,9 +290,4 @@ extern template class Aidge::AvgPooling_Op<2>;
 extern template class Aidge::AvgPooling_Op<3>;
 extern template class Aidge::AvgPooling_Op<4>;
 
-<<<<<<< HEAD
-=======
-
->>>>>>> 9b3579590d612d89cd36f42d47bb396670ef14af
-
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
-- 
GitLab


From 91e3542537a715937fdc80ebe3247deeefdf2167 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Thu, 20 Feb 2025 08:24:12 +0000
Subject: [PATCH 30/31] Fix Pad compilation for clang.

---
 include/aidge/operator/Pad.hpp | 28 +++++++++++++++-------------
 1 file changed, 15 insertions(+), 13 deletions(-)

diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index de7c3d2b2..0880b2c97 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -35,19 +35,6 @@ enum class PadAttr {
     BorderType,      ///< Type of border handling during padding.
     BorderValue      ///< Value to be used for constant padding.
 };
-
-namespace {
-    /**
-     * @brief EnumStrings specialization for PadAttr.
-     */
-    template <>
-    const char* const EnumStrings<Aidge::PadAttr>::data[] = {
-        "begin_end_borders",
-        "border_type",
-        "border_value"
-    };
-}  // namespace
-
 /**
  * @enum PadBorderType
  * @brief Types of border handling available for padding.
@@ -59,7 +46,20 @@ enum class PadBorderType {
     Wrap,     ///< Values wrap around the tensor dimensions.
     Zero      ///< All out-of-bound values are set to 0.
 };
+
 } // namespace Aidge
+
+namespace {
+    /**
+     * @brief EnumStrings specialization for PadAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::PadAttr>::data[] = {
+        "begin_end_borders",
+        "border_type",
+        "border_value"
+    };
+
 /**
  * @brief EnumStrings specialization for PadBorderType.
  */
@@ -71,6 +71,8 @@ const char* const EnumStrings<Aidge::PadBorderType>::data[] = {
     "Wrap",
     "Zero"
 };
+} // namespace
+
 namespace Aidge {
 /**
  * @class Pad_Op
-- 
GitLab


From c4831e4a3908f7087a57831176575944d225909e Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Thu, 20 Feb 2025 08:31:35 +0000
Subject: [PATCH 31/31] Fix ConstantOfShape compilation for clang.

---
 include/aidge/operator/ConstantOfShape.hpp | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index 6176f69dd..e78fba12e 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -39,12 +39,13 @@ enum class ConstantOfShapeAttr {
    */
   Value,
 };
-
+} // namespace Aidge
 namespace {
   template <>
   const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"value"};
-  }
-  
+ } //namespace
+
+  namespace Aidge {
 
 /**
  * @brief This operator's purpose is to generate a tensor of shape given via
-- 
GitLab