diff --git a/aidge_core/unit_tests/test_naming.py b/aidge_core/unit_tests/test_naming.py
new file mode 100644
index 0000000000000000000000000000000000000000..eed7180ce77b6255dc27cfb83113cde6cbfda285
--- /dev/null
+++ b/aidge_core/unit_tests/test_naming.py
@@ -0,0 +1,39 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+import inspect
+import re
+
+def is_snake_case(s: str) -> bool:
+    return bool(re.fullmatch(r'^[a-z]+(_[a-z]+)*$', s))
+
+class test_naming(unittest.TestCase):
+    """Test tensor binding
+    """
+    def setUp(self):
+        pass
+    def tearDown(self):
+        pass
+
+    def test_attributes_name(self):
+
+        for obj in inspect.getmembers(aidge_core):
+            if (inspect.isclass(obj[1]) and issubclass(obj[1], aidge_core.Operator) and obj[1] is not aidge_core.Operator) and hasattr(obj[1], "attributes_name"):
+                print(obj[0])
+                print(obj[1].attributes_name())
+                for attr_name in obj[1].attributes_name():
+                    self.assertTrue(is_snake_case(attr_name), f"Operator {obj[0]} has an attribute {attr_name} that is not in snake_case.")
+
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index 7358899a9e6dfac85299a2cf498b7e6e1ba9e7c2..bc97e1f5bdd4dcc80857db55b66e9b6bedb1fa62 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -41,20 +41,28 @@ enum class ArgMaxAttr {
      */
     SelectLastIndex
 };
-
+} // namespace Aidge
+/**
+ * @brief Provides string representations for the ArgMaxAttr enumeration.
+ */
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
+}
+namespace Aidge {
 /**
  * @brief Description of the ArgMax operation on a Tensor.
  *
  * The ArgMax operation identifies the index of the maximum value along a specified axis of a Tensor.
  *
- * The output of the ArgMax operation can retain the dimensionality of the input Tensor or reduce 
- * it by removing the specified axis. Additionally, in cases where multiple maximum values exist, 
+ * The output of the ArgMax operation can retain the dimensionality of the input Tensor or reduce
+ * it by removing the specified axis. Additionally, in cases where multiple maximum values exist,
  * the user can specify whether to select the first or the last occurrence of the maximum value.
  *
  * Attributes:
  * - `Axis`: The axis along which the ArgMax operation is performed. For example, if the axis is `0`,
  *   the operation is applied along rows; if it is `1`, it is applied along columns.
- * - `KeepDims`: A boolean indicating whether to retain the reduced axis as a dimension of size `1` 
+ * - `KeepDims`: A boolean indicating whether to retain the reduced axis as a dimension of size `1`
  *   (`true`) or to completely remove it (`false`).
  * - `SelectLastIndex`: A boolean indicating how to handle ties (multiple maximum values along the axis):
  *   - If `true`, the last index of the maximum value is selected.
@@ -177,6 +185,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ArgMaxAttr>::data;
+	}
 };
 
 /**
@@ -198,12 +214,6 @@ std::shared_ptr<Node> ArgMax(std::int32_t axis = 0,
 
 }  // namespace Aidge
 
-/**
- * @brief Provides string representations for the ArgMaxAttr enumeration.
- */
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
-}
+
 
 #endif /* AIDGE_CORE_OPERATOR_ARGMAX_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index ab9e111f2adcb1d09635924184c89800900d0635..6022d6a2a1459bbfa1844f6c6d300ed8232abed4 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -49,13 +49,23 @@ enum class AvgPoolingAttr {
      */
     CeilMode
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief String representation of the AvgPooling attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
+        "stride_dims", "kernel_dims", "dilations", "ceil_mode"
+    };
+}
+namespace Aidge {
 /**
  * @brief Class representing an Average Pooling operation.
  *
  * The AvgPooling operation computes the average value within sliding windows of specified size
  * (kernel dimensions) over the input tensor. The stride dimensions determine how the window
- * moves across the input. The dilation parameter allows spacing between kernel elements, and 
+ * moves across the input. The dilation parameter allows spacing between kernel elements, and
  * `ceil_mode` determines whether to use ceiling instead of floor when computing the output shape.
  * This operation is commonly used in neural networks to reduce spatial dimensions while preserving features.
  *
@@ -223,6 +233,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::AvgPoolingAttr>::data;
+	}
 };
 
 /**
@@ -272,12 +290,4 @@ extern template class Aidge::AvgPooling_Op<2>;
 extern template class Aidge::AvgPooling_Op<3>;
 extern template class Aidge::AvgPooling_Op<4>;
 
-namespace {
-/**
- * @brief String representation of the AvgPooling attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = { "stride_dims", "kernel_dims", "dilations", "ceil_mode" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index ddffaeb027ee6c581ae8e8c9abb06bfaa0d2a4d6..3521c9b16dcbbf73b0c3c4aea9d93047dc0a2f61 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -50,7 +50,12 @@ enum class BatchNormAttr {
    */
   TrainingMode
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum", "training_mode" };
+}
+namespace Aidge {
 /**
  * @class BatchNorm_Op
  * @brief Implements the Batch Normalization (BN) operation, a technique used to normalize the inputs of a layer.
@@ -152,6 +157,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::BatchNormAttr>::data;
+	}
 };
 
 extern template class Aidge::BatchNorm_Op<2>;
@@ -170,9 +183,4 @@ extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const bool, const std::string&);
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const bool, const std::string&);
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum", "training_mode" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_BATCHNORM_H_ */
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index 711cf858520c5f85ea0099f5a8ad9ab03940ee9f..3e9f8c3f22728afc4fae7abf5f60adc13c89ac76 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -28,11 +28,19 @@ namespace Aidge {
 
 enum class BitShiftAttr {
     /**
-     * 
+     *
      */
     BitShiftdirection
 };
-
+}
+namespace {
+    /**
+     * @brief Specialization of `EnumStrings` for `BitShiftAttr`.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = {"bit_shift_direction"};
+}
+namespace Aidge {
 /**
  * @class BitShift_Op
  * @brief A tensor operator to perform element-wise bitwise shift operations on tensors.
@@ -41,7 +49,7 @@ enum class BitShiftAttr {
  * - **InputTensor**: The tensor whose elements will be shifted.
  * - **ShiftAmount**: The tensor specifying the shift amount for each element.
  *
- * The shift is applied in the direction specified by the attribute `BitShiftdirection`, 
+ * The shift is applied in the direction specified by the attribute `BitShiftdirection`,
  * which can either be `left` or `right`.
  *
  * @see OperatorTensor
@@ -147,6 +155,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return { "OutputTensor" };
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::BitShiftAttr>::data;
+	}
 };
 
 /**
@@ -161,12 +177,6 @@ inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direc
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Specialization of `EnumStrings` for `BitShiftAttr`.
- */
-template <>
-const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = { "BitShiftdirection" };
-}
+
 
 #endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 1f934fbc7d12f79db7185a5ff12136513b2fb7df..b2ffbb553ce44f66f371a65f35340193bf04dab4 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -40,7 +40,12 @@ enum class CastAttr {
      */
     TargetType
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
+}
+namespace Aidge {
 /**
  * @brief Description of the Cast operation to convert a tensor's data type.
  *
@@ -137,6 +142,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::CastAttr>::data;
+	}
 };
 
 /**
@@ -149,9 +162,4 @@ std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name =
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
index 0825b85bbdcbcd9adf90d0dca6cbf0a292f4f94f..51ecb6eb36591c2e22ea47ba529b87d125c92a65 100644
--- a/include/aidge/operator/Clip.hpp
+++ b/include/aidge/operator/Clip.hpp
@@ -33,14 +33,23 @@ enum class ClipAttr {
     Min,  /**< Minimum value for clipping. */
     Max   /**< Maximum value for clipping. */
 };
+}
+namespace {
+    /**
+     * @brief Specialization of EnumStrings for ClipAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::ClipAttr>::data[] = { "min", "max" };
+}
 
+namespace Aidge {
 /**
  * @brief Description of the Clip operation to limit tensor values within a specified range.
  *
  * The Clip operator ensures tensor elements are within the range `[min, max]`.
  * - Values less than `min` are set to `min`.
  * - Values greater than `max` are set to `max`.
- * 
+ *
  * The input and output Tensors have the same dimensions.
  *
  * ### Attributes:
@@ -148,6 +157,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return { "data_output" };
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ClipAttr>::data;
+	}
 };
 
 /**
@@ -165,12 +182,4 @@ std::shared_ptr<Aidge::Node> Clip(
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Specialization of EnumStrings for ClipAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::ClipAttr>::data[] = { "min", "max" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_CLIP_H_ */
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 83914b6730bde238d5e2e7b4391bd034c8f4d146..1f8a357a830ef3bf3d945ea488425128ea99d3ed 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -56,9 +56,19 @@ enum class ConcatAttr {
      *
      * The specified axis determines the direction of concatenating.
      */
-    Axis 
+    Axis
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief Specialization of EnumStrings for ConcatAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
+        "axis"
+    };
+}
+namespace Aidge {
 /**
  * @class Concat_Op
  * @brief Implements the Concat operation to concatenate multiple tensors along a specified axis.
@@ -107,7 +117,7 @@ public:
      * @param[in] nbIn Number of input tensors.
      * @param[in] axis Axis along which concatenation is performed.
      */
-    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis);
+    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis = 0);
 
     /**
      * @brief Copy-constructor. Copies the operator attributes and its output tensors,
@@ -169,6 +179,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return { "data_output" };
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ConcatAttr>::data;
+	}
 };
 
 /**
@@ -182,14 +200,4 @@ std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0,
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Specialization of EnumStrings for ConcatAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
-    "axis"
-};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_CONCAT_H_ */
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index 18e626544606fd150b2843d2367aa8858669c2ba..e78fba12ec89be456da0aca25c9bb15e170bdede 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -39,6 +39,13 @@ enum class ConstantOfShapeAttr {
    */
   Value,
 };
+} // namespace Aidge
+namespace {
+  template <>
+  const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"value"};
+ } //namespace
+
+  namespace Aidge {
 
 /**
  * @brief This operator's purpose is to generate a tensor of shape given via
@@ -63,7 +70,7 @@ private:
 public:
   /**
    * @brief constructor for ConstantOfShape_op
-   * @param[in] value : a scalar tensor which holds the value that will 
+   * @param[in] value : a scalar tensor which holds the value that will
    * fill the output tensor
    */
   ConstantOfShape_Op(const Tensor &value = Tensor(0.f))
@@ -116,6 +123,14 @@ public:
   static const std::vector<std::string> getOutputsName() {
     return {"constant_of_shape"};
   }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ConstantOfShapeAttr>::data;
+	}
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
@@ -127,10 +142,5 @@ inline std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f),
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"Value"};
-}
-
 #endif // AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
 
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 8984ebd08cb0b1ddfffc885f7d8c2e3df9b23da2..135ff8860706d245ae6095322d6cf017456cc2e1 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -40,15 +40,24 @@ enum class ConvAttr {
     DilationDims,   // The dilation dimensions
     KernelDims      // The kernel dimensions
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
+        "stride_dims",
+        "dilation_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @class Conv_Op
  * @brief Convolution operator for performing a multi-dimensional convolution.
- * 
- * The Conv_Op class implements a convolution operator for tensors with customizable 
- * kernel dimensions, stride, and dilation values. The operator performs a convolution 
+ *
+ * The Conv_Op class implements a convolution operator for tensors with customizable
+ * kernel dimensions, stride, and dilation values. The operator performs a convolution
  * operation on the input tensor and produces an output tensor.
- * 
+ *
  * ### Attributes:
  * - `strideDims`: Stride for each dimension of the input.
  * - `dilationDims`: Dilation for each dimension of the input.
@@ -63,7 +72,7 @@ enum class ConvAttr {
  *      - Stride dimensions: {1, 1} (stride of 1 in both height and width)
  *      - Dilation dimensions: {1, 1} (no dilation)
  *      - Padding: None
- *      - Output shape: 
+ *      - Output shape:
  *         (1, 64, (32−3+2×0)/1+1, (32−3+2×0)/1+1) = (1, 64, 30, 30)
  *
  * @see OperatorTensor
@@ -209,6 +218,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ConvAttr>::data;
+	}
 };
 
 /**
@@ -260,13 +277,5 @@ inline std::shared_ptr<Node> Conv(
 extern template class Aidge::Conv_Op<1>;
 extern template class Aidge::Conv_Op<2>;
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
-    "stride_dims",
-    "dilation_dims",
-    "kernel_dims"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 03e821041981b5d5bc6ca972c5923751588a75eb..b307d67a61cabd416bb96db8558fb6960cd65cc4 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -34,15 +34,24 @@ enum class ConvDepthWiseAttr {
     DilationDims, // The dilation dimensions for the convolution.
     KernelDims    // The kernel dimensions for the convolution.
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {
+        "stride_dims",
+        "dilation_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @class ConvDepthWise_Op
  * @brief Depthwise Convolution operator for performing a multi-dimensional depthwise convolution.
- * 
- * The ConvDepthWise_Op class implements a depthwise convolution operator for tensors with customizable 
- * kernel dimensions, stride, and dilation values. It performs a depthwise convolution operation on the 
+ *
+ * The ConvDepthWise_Op class implements a depthwise convolution operator for tensors with customizable
+ * kernel dimensions, stride, and dilation values. It performs a depthwise convolution operation on the
  * input tensor and produces an output tensor.
- * 
+ *
  * ### Attributes:
  * - strideDims: Stride for each dimension of the input.
  * - dilationDims: Dilation for each dimension of the input.
@@ -189,6 +198,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ConvDepthWiseAttr>::data;
+	}
 };
 
 /**
@@ -237,13 +254,4 @@ inline std::shared_ptr<Node> ConvDepthWise(
 extern template class Aidge::ConvDepthWise_Op<1>;
 extern template class Aidge::ConvDepthWise_Op<2>;
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {
-    "stride_dims",
-    "dilation_dims",
-    "kernel_dims"
-};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index 769dad767e90dfe23f67867d72f08f0787d1cdf8..c99f7bbb7d882300b7f2f4278dda832189064ad5 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -51,7 +51,12 @@ enum class DepthToSpaceAttr {
     BlockSize, /**< The block size for rearranging depth to spatial dimensions. */
     Mode       /**< The mode for depth-to-space transformation. */
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
+}
+namespace Aidge{
 /**
  * @class DepthToSpace_Op
  * @brief Represents the DepthToSpace operation to rearrange data from depth to spatial dimensions.
@@ -164,6 +169,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::DepthToSpaceAttr>::data;
+	}
 };
 
 /**
@@ -179,9 +192,5 @@ std::shared_ptr<Node> DepthToSpace(const std::uint32_t blockSize,
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
-}
 
 #endif //AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
diff --git a/include/aidge/operator/Flatten.hpp b/include/aidge/operator/Flatten.hpp
index a7f5c6435f17eefceae6c82655599096229c3b3c..b61fc6912dd0e9f61dd2506370c591aae8c3a107 100644
--- a/include/aidge/operator/Flatten.hpp
+++ b/include/aidge/operator/Flatten.hpp
@@ -54,7 +54,12 @@ enum class FlattenAttr {
      */
     Axis
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::FlattenAttr>::data[] = { "axis" };
+}
+namespace Aidge {
 /**
  * @brief Description the Flatten operation to reshape a tensor into a 2D matrix.
  *
@@ -155,6 +160,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::FlattenAttr>::data;
+	}
 };
 
 /**
@@ -171,9 +184,5 @@ std::shared_ptr<Node> Flatten(std::int64_t axis = 1,
                             const std::string &name = "");
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::FlattenAttr>::data[] = { "axis" };
-}
 
 #endif /* AIDGE_CORE_OPERATOR_FLATTEN_H_ */
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index 3b5b9449d82c1dac315573e2820b1dda7c6fb7bf..2f9974e8ed3b1723734a2483616feceace5bec33 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -64,7 +64,17 @@ enum class FoldAttr {
      */
     KernelDims
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::FoldAttr>::data[] = {
+        "output_dims",
+        "stride_dims",
+        "dilation_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @class Fold_Op
  * @brief Implements the Fold operation to combine or transform tensor dimensions.
@@ -82,7 +92,7 @@ enum class FoldAttr {
  *       output height (out_h) = floor((input height - kernel height) / stride height) + 1
  *       output width (out_w) = floor((input width - kernel width) / stride width) + 1
  *      - The exact output shape will depend on these calculations for each spatial dimension (height, width) and the number of output channels.
- *         
+ *
  * @example:
  *  - Input shape: (1, 16, 32, 32)  // Batch size: 1, Channels: 16, Height: 32, Width: 32
  *  - Kernel dimensions: (3, 3)  // 3x3 kernel
@@ -210,11 +220,19 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::FoldAttr>::data;
+	}
 };
 
 /**
  * @brief Create a Fold operation node.
- * 
+ *
  * This function creates a Fold operation node that applies a fold transformation
  * to a tensor based on the specified attributes.
  *
@@ -247,14 +265,4 @@ extern template class Aidge::Fold_Op<2>;
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::FoldAttr>::data[] = {
-    "output_dims",
-    "stride_dims",
-    "dilation_dims",
-    "kernel_dims"
-};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_FOLD_H_ */
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index dc3e1a814248a2b742d813f679eea22c1954e1a9..86fc7bc7855473c6f73e3bcc36d46ef9b4956446 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -61,6 +61,12 @@ enum class GatherAttr {
     GatheredShape
 };
 
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
+}
+namespace Aidge {
 /**
  * @brief Description for the Gather operation on an input tensor.
  *
@@ -184,6 +190,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::GatherAttr>::data;
+	}
 };
 
 /**
@@ -205,9 +219,5 @@ std::shared_ptr<Node> Gather(std::int8_t axis = 0,
 
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index 999f7bba1c7399ee6e813e7fa4297e27b7b7ae58..06642231152cefe1023688811da0dcdc0bbde859 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -29,6 +29,16 @@ enum class GridSampleAttr {
 	PaddingMode,	// Specifies how to handle out-of-boundary grid values.
 	AlignCorners	// Determines whether grid values are normalized to align with the image corners.
 };
+} // namespace Aidge
+namespace {
+	template <>
+	const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
+		"mode",
+		"padding_mode",
+		"align_corners"
+	};
+}
+namespace Aidge {
 
 /**
  * @class GridSample_Op
@@ -170,6 +180,14 @@ public:
 	static const std::vector<std::string> getOutputsName() {
 		return {"data_output"};
 	}
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::GridSampleAttr>::data;
+	}
 };
 
 /**
@@ -189,13 +207,4 @@ std::shared_ptr<Node> GridSample(
 
 } // namespace Aidge
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
-    "mode",
-    "padding_mode",
-    "align_corners"
-};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_GRIDSAMPLE_H_ */
diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp
index 94eaa400a52c8e3e9ffcf40c4896aee423fb6ed9..806ed47f3db5f78b5636f7f14876f852ea22b341 100644
--- a/include/aidge/operator/Heaviside.hpp
+++ b/include/aidge/operator/Heaviside.hpp
@@ -31,6 +31,15 @@ enum class HeavisideAttr {
      */
     Value
 };
+} // namespace Aidge
+namespace {
+    /**
+     * @brief Define string representations for Heaviside attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"};
+}
+namespace Aidge {
 
 /**
  * @class Heaviside_Op
@@ -110,6 +119,14 @@ public:
         return {"output"};
     }
 
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::HeavisideAttr>::data;
+	}
+
     /**
      * @brief Get the attributes of the operator.
      */
@@ -141,12 +158,5 @@ std::shared_ptr<Node> Heaviside(float value, const std::string &name = "");
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Define string representations for Heaviside attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_HEAVISIDE_H_ */
diff --git a/include/aidge/operator/LRN.hpp b/include/aidge/operator/LRN.hpp
index 369da5f97edf1927ec3f255b7ace35520212e031..6c82b6b4670cff44e9d21aeabe8f64aa2b2e2397 100644
--- a/include/aidge/operator/LRN.hpp
+++ b/include/aidge/operator/LRN.hpp
@@ -30,20 +30,28 @@ enum class LRNAttr {
     Bias,   ///< Constant bias added to the normalization term.
     Size    ///< Number of channels to normalize over.
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for LRNAttr.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size", nullptr};
+}
+namespace Aidge {
 /**
  * @brief Description of a Local Response Normalization (LRN) operation on an input Tensor.
  *
- * LRN is a normalization technique that applies across channels in a local region 
- * to enhance generalization and promote competition between neurons. It is commonly 
+ * LRN is a normalization technique that applies across channels in a local region
+ * to enhance generalization and promote competition between neurons. It is commonly
  * used in Convolutional Neural Networks (CNNs).
  *
  * For each element x in the input Tensor, the function is defined as:
  * `f(x) = x / (bias + alpha * sum(x_i^2))^beta`, where:
  * - `x` is the current element being normalized.
- * - The summation `sum(x_i^2)` is taken over a local region of `size` channels 
+ * - The summation `sum(x_i^2)` is taken over a local region of `size` channels
  *   surrounding `x` (both before and after the current channel, if available).
- * - `bias`, `alpha`, and `beta` are scalar hyperparameters controlling the 
+ * - `bias`, `alpha`, and `beta` are scalar hyperparameters controlling the
  *   normalization behavior.
  *
  * Parameters:
@@ -52,7 +60,7 @@ enum class LRNAttr {
  * - `alpha`: A scaling factor for the squared sum of elements in the local region.
  * - `beta`: The exponent applied to the normalization term.
  *
- * The input and output Tensors have the same shape. If the input Tensor has shape `(N, C, H, W)`, 
+ * The input and output Tensors have the same shape. If the input Tensor has shape `(N, C, H, W)`,
  * the output Tensor will also have shape `(N, C, H, W)`.
  *
  * @see OperatorTensor
@@ -158,6 +166,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::LRNAttr>::data;
+	}
 };
 
 /**
@@ -171,12 +187,4 @@ std::shared_ptr<Node> LRN(std::int32_t size, const std::string& name = "");
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for LRNAttr.
- */
-template <>
-const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_LRN_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 46730d0269dfeff1dd7cc47fe5ae01597b28dcdf..acf9bae7f4955fee09699f27b7a23c06ce3d670e 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -30,7 +30,13 @@ enum class LeakyReLUAttr {
      */
     NegativeSlope
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
+        = {"negative_slope"};
+    }
+namespace Aidge{
 /**
  * @class LeakyReLU_Op
  * @brief Implements the LeakyReLU activation function.
@@ -77,7 +83,7 @@ public:
     /**
      * @brief Copy-constructor.
      * @param[in] op LeakyReLU_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not its input tensors. 
+     * @details Copies the operator attributes and its output tensor(s), but not its input tensors.
      * The new operator has no associated input.
      */
     LeakyReLU_Op(const LeakyReLU_Op& op);
@@ -115,6 +121,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::LeakyReLUAttr>::data;
+	}
 };
 
 /**
@@ -127,10 +141,4 @@ public:
 std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "");
 }
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
-    = {"negative_slope"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_LEAKYRELU_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 9063fb88b4e018826bff82e0e09e6dbfdbd48421..d90aab4a0b7581a5d1e2c7eaf6fb295e51953af4 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -59,6 +59,16 @@ enum class MaxPoolingAttr {
    */
   CeilMode,
 };
+} // namespace Aidge
+namespace {
+    /**
+     * @brief String representations of MaxPooling attributes for debugging and logging.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "dilations", "ceil_mode"};
+    }
+
+namespace Aidge{
 
 /**
  * @class MaxPooling_Op
@@ -66,8 +76,8 @@ enum class MaxPoolingAttr {
  * @brief Implements the MaxPooling operation over a specified input tensor.
  *
  * MaxPooling reduces spatial dimensions by applying a max filter over a sliding window.
- * The stride dimensions determine how the window moves across the input. The dilation 
- * parameter allows spacing between kernel elements, and `ceil_mode` determines whether 
+ * The stride dimensions determine how the window moves across the input. The dilation
+ * parameter allows spacing between kernel elements, and `ceil_mode` determines whether
  * to use ceiling instead of floor when computing the output shape.
  *
  * ### Output Shape Calculation
@@ -198,6 +208,14 @@ public:
      * @return A vector of output tensors names.
      */
     static const std::vector<std::string> getOutputsName(){ return {"data_output"}; }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::MaxPoolingAttr>::data;
+	}
 };
 
 /**
@@ -247,12 +265,5 @@ inline std::shared_ptr<Node> MaxPooling(
 
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief String representations of MaxPooling attributes for debugging and logging.
- */
-template <>
-const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "dilations", "ceil_mode"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index deefc007772ccaa8744c42aa47dbb5f2d5a2c7f5..59df17ec146bb33dc1e6e8c007eb275054fd727b 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -120,10 +120,22 @@ enum class MemorizeAttr {
     ForwardStep,    // Tracks the current step in the forward pass.
     EndStep         // The final step for which memory updates will occur.
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief String representations of the Memorize operator's attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
+        "schedule_step",
+        "forward_step",
+        "end_step"
+    };
+}
+namespace Aidge {
 /**
  * @class Memorize_Op
- * @brief The Memorize Operator is responsible for storing a tensor's state over a defined 
+ * @brief The Memorize Operator is responsible for storing a tensor's state over a defined
  * number of iterations and providing the stored value as output at each iteration.
  *
  *  Memorize operators are used in models with recurrent structures or feedback loops, such as LSTMs.
@@ -240,6 +252,14 @@ public:
     static const std::vector<std::string> getOutputsName(){
         return {"data_output", "data_output_rec"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::MemorizeAttr>::data;
+	}
 };
 
 /**
@@ -251,16 +271,5 @@ public:
 std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "");
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief String representations of the Memorize operator's attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
-    "schedule_step",
-    "forward_step",
-    "end_step"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_MEMORIZE_H_ */
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index c1ed3500cf3dc8c36c611f2d5b41744e881c299e..0880b2c97ed7e2e6e9e4515c82c37aa4e0e91233 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -35,7 +35,6 @@ enum class PadAttr {
     BorderType,      ///< Type of border handling during padding.
     BorderValue      ///< Value to be used for constant padding.
 };
-
 /**
  * @enum PadBorderType
  * @brief Types of border handling available for padding.
@@ -48,6 +47,33 @@ enum class PadBorderType {
     Zero      ///< All out-of-bound values are set to 0.
 };
 
+} // namespace Aidge
+
+namespace {
+    /**
+     * @brief EnumStrings specialization for PadAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::PadAttr>::data[] = {
+        "begin_end_borders",
+        "border_type",
+        "border_value"
+    };
+
+/**
+ * @brief EnumStrings specialization for PadBorderType.
+ */
+template <>
+const char* const EnumStrings<Aidge::PadBorderType>::data[] = {
+    "Constant",
+    "Edge",
+    "Reflect",
+    "Wrap",
+    "Zero"
+};
+} // namespace
+
+namespace Aidge {
 /**
  * @class Pad_Op
  * @brief Implementation of the Pad operator.
@@ -64,14 +90,14 @@ enum class PadBorderType {
  * The operator supports various border handling techniques (e.g., constant padding, reflection, wrapping).
  *
  * ### Output Tensor Shape:
- * If the input tensor has a shape `[B, C, d1, d2, ..., dN]`, where `B` is the batch size, 
- * `C` is the number of channels, and `[d1, d2, ..., dN]` are the spatial dimensions, 
- * and the padding is defined by `beginEndTuples = {b1, e1, b2, e2, ..., bN, eN}`, 
+ * If the input tensor has a shape `[B, C, d1, d2, ..., dN]`, where `B` is the batch size,
+ * `C` is the number of channels, and `[d1, d2, ..., dN]` are the spatial dimensions,
+ * and the padding is defined by `beginEndTuples = {b1, e1, b2, e2, ..., bN, eN}`,
  * the output tensor shape will be:
- * 
+ *
  * `[B, C, d1 + b1 + e1, d2 + b2 + e2, ..., dN + bN + eN]`.
- * 
- * The padding values `b_i` and `e_i` specify the number of elements to add before and after 
+ *
+ * The padding values `b_i` and `e_i` specify the number of elements to add before and after
  * the corresponding spatial dimension `d_i`. Batch size and channel count remain unchanged.
  *
  * @example Constant Padding:
@@ -92,7 +118,7 @@ enum class PadBorderType {
  *    - Output tensor shape: `[B, C, 4 + 1 + 1, 5 + 2 + 2, 6 + 0 + 0] = [B, C, 6, 9, 6]`
  *    - Padding values mirror the existing tensor values.
  *
- * This operator is commonly used for image processing, extending spatial dimensions while maintaining 
+ * This operator is commonly used for image processing, extending spatial dimensions while maintaining
  * batch and channel consistency, or aligning tensor dimensions in machine learning workflows.
  */
 template <DimIdx_t DIM>
@@ -216,6 +242,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::PadAttr>::data;
+	}
 };
 
 /**
@@ -250,30 +284,6 @@ inline std::shared_ptr<Node> Pad(
 extern template class Aidge::Pad_Op<1>;
 extern template class Aidge::Pad_Op<2>;
 
-namespace {
-
-/**
- * @brief EnumStrings specialization for PadAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::PadAttr>::data[] = {
-    "begin_end_borders",
-    "border_type",
-    "border_value"
-};
-
-/**
- * @brief EnumStrings specialization for PadBorderType.
- */
-template <>
-const char* const EnumStrings<Aidge::PadBorderType>::data[] = {
-    "Constant",
-    "Edge",
-    "Reflect",
-    "Wrap",
-    "Zero"
-};
 
-}  // namespace
 
 #endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 2cf567329496e5f8a7745ab3461dc6f74d0ea1ba..d9d52f9bcd07a671d68e3db53c378c9ee6659c8e 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -101,7 +101,17 @@ enum class PopAttr {
     ForwardStep,    // Tracks the current step in the forward pass
     BackwardStep    // Tracks the current step in the backward pass
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief String representations of the `Pop` operator's attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::PopAttr>::data[] = {
+        "forward_step", "backward_step"
+    };
+}
+namespace Aidge {
 /**
  * @class Pop_Op
  * @brief The `Pop` operator is responsible for removing and outputting elements from a data structure.
@@ -211,6 +221,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::PopAttr>::data;
+	}
 };
 
 /**
@@ -221,14 +239,5 @@ public:
 std::shared_ptr<Node> Pop(const std::string& name = "");
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief String representations of the `Pop` operator's attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::PopAttr>::data[] = {
-    "forward_step", "backward_step"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_POP_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 1d6b965820e90dcd9b54c61795358c5332d77efc..3690579d34373b64eec20042b7f9615266c15aee 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -35,25 +35,33 @@ namespace Aidge {
  * @brief Attributes specific to the `Producer_Op` class.
  */
 enum class ProdAttr { Constant };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief Enum string representation for `ProdAttr`.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::ProdAttr>::data[] = {"constant"};
+}
+namespace Aidge {
 /**
  * @class Producer_Op
  * @brief Represents an operator that stores a tensor in memory and provides it as an output.
- * 
- * The `Producer_Op` class is a specialized operator designed to store a tensor in memory 
- * and return it as an output tensor. It is typically used to store parameters or input 
- * values for a computational graph. A `Producer_Op` does not have any input data, parameters, 
- * or attributes, making it a fundamental building block for constant or initialized values 
+ *
+ * The `Producer_Op` class is a specialized operator designed to store a tensor in memory
+ * and return it as an output tensor. It is typically used to store parameters or input
+ * values for a computational graph. A `Producer_Op` does not have any input data, parameters,
+ * or attributes, making it a fundamental building block for constant or initialized values
  * within the graph.
- * 
+ *
  * Key characteristics of a `Producer_Op`:
  * - No inputs: The operator does not accept any input tensors.
  * - No parameters or attributes: It is solely responsible for producing an output tensor.
  * - Stores and returns a tensor: The stored tensor is accessible as the operator's output.
- * 
- * This operator is useful for scenarios where fixed or pre-initialized tensors need to 
+ *
+ * This operator is useful for scenarios where fixed or pre-initialized tensors need to
  * be introduced into a graph, such as weights, biases, or constant values.
- * 
+ *
  * @see OperatorTensor
  * @see Registrable
  */
@@ -77,7 +85,7 @@ public:
 
     /**
      * @brief Constructs a `Producer_Op` object with specific dimensions.
-     * 
+     *
      * @tparam DIM The number of dimensions for the tensor.
      * @param[in] dims Array defining the dimensions of the tensor.
      * @param[in] constant Indicates whether the tensor is constant.
@@ -87,7 +95,7 @@ public:
 
     /**
      * @brief Constructs a `Producer_Op` object from an existing tensor.
-     * 
+     *
      * @param[in] tensor A shared pointer to the tensor to be produced.
      * @param[in] constant Indicates whether the tensor should be constant.
      */
@@ -95,10 +103,10 @@ public:
 
     /**
      * @brief Copy constructor.
-     * 
-     * Copies the attributes and output tensors of the operator. 
+     *
+     * Copies the attributes and output tensors of the operator.
      * Input tensors are not copied, and the new operator will have no associated inputs.
-     * 
+     *
      * @param[in] op The `Producer_Op` object to copy.
      */
     Producer_Op(const Producer_Op& op);
@@ -106,28 +114,28 @@ public:
 public:
     /**
      * @brief Conversion operator to retrieve the output tensor.
-     * 
+     *
      * @return A shared pointer to the output tensor.
      */
     operator std::shared_ptr<Tensor>() const { return mOutputs[0]; }
 
     /**
      * @brief Clones the operator using the copy constructor.
-     * 
+     *
      * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
     /**
      * @brief Retrieves the dimensions of the output tensor.
-     * 
+     *
      * @return A vector containing the dimensions of the output tensor.
      */
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
     /**
      * @brief Sets the backend for the operator's execution.
-     * 
+     *
      * @param[in] name The name of the backend.
      * @param[in] device The device index (default is 0).
      */
@@ -135,35 +143,35 @@ public:
 
     /**
      * @brief Retrieves the list of available backends for this operator.
-     * 
+     *
      * @return A set containing the names of available backends.
      */
     std::set<std::string> getAvailableBackends() const override;
 
     /**
      * @brief Retrieves the operator's attributes.
-     * 
+     *
      * @return A shared pointer to the operator's attributes.
      */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     /**
      * @brief Retrieves the constant attribute.
-     * 
+     *
      * @return A reference to the constant attribute.
      */
     inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
 
     /**
      * @brief Performs the forward operation for the operator.
-     * 
+     *
      * Generates the output tensor based on the defined attributes and configuration.
      */
     void forward() override final;
 
     /**
      * @brief Placeholder for the backward operation.
-     * 
+     *
      * This function logs a debug message, as `Producer_Op` typically does not support backpropagation.
      */
     void backward() override final {
@@ -172,12 +180,12 @@ public:
 
     /**
      * @brief Associates an input tensor with the operator.
-     * 
+     *
      * This operation is not supported by `Producer_Op` as it does not take inputs.
-     * 
+     *
      * @param[in] inputIdx The index of the input.
      * @param[in] data A shared pointer to the data to associate.
-     * 
+     *
      * @throws std::runtime_error Always throws, as inputs are not supported.
      */
     void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
@@ -186,35 +194,35 @@ public:
 
     /**
      * @brief Checks whether dimensions are forwarded.
-     * 
+     *
      * @return Always true for `Producer_Op`.
      */
     inline bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
 
     /**
      * @brief Confirms that dimensions have been forwarded.
-     * 
+     *
      * @return Always true for `Producer_Op`.
      */
     inline bool dimsForwarded() const noexcept override final { return true; }
 
     /**
      * @brief Retrieves the names of the inputs for the operator.
-     * 
+     *
      * @return An empty vector, as `Producer_Op` takes no inputs.
      */
     static const std::vector<std::string> getInputsName() { return {}; }
 
     /**
      * @brief Retrieves the names of the outputs for the operator.
-     * 
+     *
      * @return A vector containing the output name "data_output".
      */
     static const std::vector<std::string> getOutputsName() { return {"data_output"}; }
 
     /**
      * @brief Sets the output tensor for the operator.
-     * 
+     *
      * @param[in] outputIdx Index of the output to set.
      * @param[in] data A shared pointer to the data.
      */
@@ -223,12 +231,12 @@ public:
 
 /**
  * @brief Helper function to create a producer node with specified dimensions.
- * 
+ *
  * @tparam DIM The number of dimensions.
  * @param[in] dims Array defining the dimensions of the tensor.
  * @param[in] name Optional name for the node.
  * @param[in] constant Indicates whether the tensor should be constant.
- * 
+ *
  * @return A shared pointer to the created node.
  */
 template <std::size_t DIM>
@@ -236,11 +244,11 @@ std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM>& dims, const std
 
 /**
  * @brief Helper function with a C-style array for dimension deduction.
- * 
+ *
  * @param[in] dims C-style array defining the tensor dimensions.
  * @param[in] name Optional name for the node.
  * @param[in] constant Indicates whether the tensor should be constant.
- * 
+ *
  * @return A shared pointer to the created node.
  */
 template <std::size_t DIM>
@@ -257,12 +265,12 @@ std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode,
 
 /**
  * @brief Adds a producer node to another node with a C-style array.
- * 
+ *
  * @param[in] otherNode The node to associate with the producer.
  * @param[in] inputIdx The input index.
  * @param[in] dims C-style array defining the tensor dimensions.
  * @param[in] extension An extension string for the producer.
- * 
+ *
  * @return A shared pointer to the updated node.
  */
 template <std::size_t DIM>
@@ -272,12 +280,4 @@ std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOInde
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief Enum string representation for `ProdAttr`.
- */
-template <>
-const char* const EnumStrings<Aidge::ProdAttr>::data[] = {"constant"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 6aded36383fbb8c96cc29af12ce5a6020cbd8f32..3ee4a1bec40f7f6aa409308708bc3338174c652b 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -51,7 +51,16 @@ enum class ReduceMeanAttr {
    */
   NoopWithEmptyAxes
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {
+        "axes",
+        "keep_dims",
+        "noop_with_empty_axes"
+    };
+}
+namespace Aidge {
 /**
  * @class ReduceMean_Op
  * @brief Implements the ReduceMean operation to compute the mean of a tensor along specified axes.
@@ -165,6 +174,14 @@ public:
         return {"data_output"};
     }
 
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ReduceMeanAttr>::data;
+	}
+
     virtual ~ReduceMean_Op() noexcept;
 };
 
@@ -186,13 +203,5 @@ std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {
-    "axes",
-    "keep_dims",
-    "noop_with_empty_axes"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index 5a3674b21bca674a966d39dc103cde60a4964071..adb58f895cf3fbfa67b84c518a7f6cedf09d1a19 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -52,6 +52,12 @@ enum class ReduceSumAttr {
   NoopWithEmptyAxes
 };
 
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
+}
+namespace Aidge {
 /**
  * @class ReduceSum_Op
  * @brief Implements the ReduceSum operation to compute the sum of a tensor along specified axes.
@@ -100,7 +106,7 @@ public:
     /**
      * @brief constructor for ReduceSum op
      * @param[in] axes around which perform the operation
-     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and
      * if false we remove the dimension completely
      * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
      * and if false, we reduce on all axes
@@ -170,6 +176,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ReduceSumAttr>::data;
+	}
 };
 
 /**
@@ -194,9 +208,4 @@ inline std::shared_ptr<Node> ReduceSum(const std::vector<std::int32_t> &axes={},
 }
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index c170ad79e202158f78681855001ff46ba8167261..e69c42d4d98974e7bb00acbf17581cd56ada1331 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -53,21 +53,29 @@ enum class ReshapeAttr {
      * @brief The target shape for the output tensor.
      */
     Shape,
-    
+
     /**
      * @brief Whether zeros in the shape attribute are allowed.
-     * 
+     *
      * When true, zeros in the target shape retain the corresponding dimension size from the input tensor.
      */
     AllowZero
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for ReshapeAttr.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = {"shape", "allow_zero"};
+}
+namespace Aidge {
 /**
  * @brief Description of Reshape operator that adjusts the shape of the input tensor.
  *
- * This operator reshapes the input tensor according to the specified target shape. 
- * If the target shape is not compatible with the input tensor's total number of elements, 
- * the operation will fail. If the `AllowZero` attribute is true, zeros in the target shape 
+ * This operator reshapes the input tensor according to the specified target shape.
+ * If the target shape is not compatible with the input tensor's total number of elements,
+ * the operation will fail. If the `AllowZero` attribute is true, zeros in the target shape
  * retain the corresponding dimensions from the input tensor.
  *
  * @example Input: Tensor of dimensions `[2, 3]` with `Shape = {3, 2}` results in a tensor with dimensions `[3, 2]`.
@@ -176,6 +184,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ReshapeAttr>::data;
+	}
 };
 
 /**
@@ -192,12 +208,5 @@ std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
 
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for ReshapeAttr.
- */
-template <>
-const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = {"shape", "allow_zero"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_RESHAPE_H_ */
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index c3c7838efc16a0d091f5f0422442225cef8a0ab5..37d42fcc861db42c991a6e7f4296d725d002aad5 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -39,7 +39,17 @@ enum class ResizeAttr {
     InterpolationMode,
     PaddingMode
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
+        "coordinate_transformation_mode",
+        "cubic_coeff_a",
+        "interpolation_mode",
+        "padding_mode"
+    };
+}
+namespace Aidge {
 /**
  * @brief Resize operator, will up/downscale a given tensor given the input.
  * @verbatim
@@ -191,6 +201,14 @@ class Resize_Op
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ResizeAttr>::data;
+	}
 };
 
 /**
@@ -222,13 +240,4 @@ Resize(std::vector<float> scale = std::vector<float>(),
 
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
-    "coordinateTransformationMode",
-    "cubicCoeffA",
-    "InterpolationMode",
-    "PaddingMode"
-};
-}
 #endif /* AIDGE_CORE_OPERATOR_RESIZE_H_ */
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index b33fb584165efd7773c85249d713953e8303dc6b..fb342d34580092febaf3d1e63ea78247c3e8f77a 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -23,7 +23,7 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-// Caution: This operator is now deprecated and should no longer be used. 
+// Caution: This operator is now deprecated and should no longer be used.
 // It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
 
 namespace Aidge {
@@ -38,7 +38,7 @@ enum class ScalingAttr {
     /**
      * @brief Number of quantization bits.
      *
-     * Specifies the bit-width used for quantization. 
+     * Specifies the bit-width used for quantization.
      * For example, a value of `8` represents 8-bit quantization.
      */
     QuantizedNbBits,
@@ -51,12 +51,18 @@ enum class ScalingAttr {
      */
     IsOutputUnsigned
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char* const EnumStrings<Aidge::ScalingAttr>::data[]
+        = {"scaling_factor", "quantized_nb_bits", "is_output_unsigned"};
+}
+namespace Aidge {
 /**
  * @brief Description of a scaling operation to scale and quantize input tensors.
  *
- * The `Scaling_Op` class applies a scaling factor to the input tensor, quantizes 
- * the scaled values to a specified bit-width, and outputs either signed or unsigned integers 
+ * The `Scaling_Op` class applies a scaling factor to the input tensor, quantizes
+ * the scaled values to a specified bit-width, and outputs either signed or unsigned integers
  * based on the configuration.
  *
  * The input and output Tensors have the same dimensions.
@@ -94,7 +100,7 @@ public:
     /**
      * @brief Copy-constructor.
      * @param[in] op Scaling_Op to copy.
-     * @details Copies the operator attributes and its output tensor(s), but not its input tensors. 
+     * @details Copies the operator attributes and its output tensor(s), but not its input tensors.
      * The new operator has no associated input.
      */
     Scaling_Op(const Scaling_Op& op);
@@ -134,6 +140,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ScalingAttr>::data;
+	}
 };
 
 /**
@@ -151,10 +165,5 @@ std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
                                      const std::string& name = "");
 } // namespace Aidge
 
-namespace {
-template <>
-const char* const EnumStrings<Aidge::ScalingAttr>::data[]
-    = {"scaling_factor", "quantized_nb_bits", "is_output_unsigned"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 609e354d57c2632f7ae5df53baf42c04843c8383..2a553fb827fc8a8d4b03fa06ebcd8825ae2ed64f 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -62,7 +62,15 @@ enum class ShapeAttr {
      */
     End
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for ShapeAttr.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"start", "end"};
+}
+namespace Aidge {
 /**
  * @brief Description of the operation of extracting the shape of a tensor.
  *
@@ -163,6 +171,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::ShapeAttr>::data;
+	}
 };
 
 /**
@@ -177,12 +193,6 @@ std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for ShapeAttr.
- */
-template <>
-const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"start", "end"};
-}
+
 
 #endif /* AIDGE_CORE_OPERATOR_SHAPE_H_ */
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index d32bc4fe2ec9846d4467316cace8f76e008dba2d..fa21b3d197551e54a95fe29dbb8e3f83d30865af 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -84,7 +84,12 @@ enum class SliceAttr {
      */
     Steps
 };
-
+} // namespace Aidge
+namespace {
+    template <>
+    const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" };
+}
+namespace Aidge{
 /**
  * @class Slice_Op
  * @brief Implements the Slice operation for extracting sub-tensors.
@@ -203,6 +208,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::SliceAttr>::data;
+	}
 };
 
 /**
@@ -223,9 +236,4 @@ std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
 
 }  // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" };
-}
-
 #endif /* AIDGE_CORE_OPERATOR_SLICE_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 290132690149d4087f2ef89f9f3d0b9af631fff4..86e1a57e70c4b7070b9af279980b2d5344a2f6f0 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -33,7 +33,15 @@ enum class SoftmaxAttr {
      */
     Axis
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for SoftmaxAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"};
+}
+namespace Aidge {
 /**
  * @brief Description of a Softmax operation on input Tensor along a specified axis.
  *
@@ -130,6 +138,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::SoftmaxAttr>::data;
+	}
 };
 
 /**
@@ -143,12 +159,4 @@ std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "");
 
 } // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for SoftmaxAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 3c6b52d3c39ade1f73c0391f02964d0bba74148b..8b6acb06023f5f71cbb71b42281f21bda19caaed 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -65,7 +65,17 @@ enum class SplitAttr {
      */
     Split
 };
+} // namespace Aidge
 
+namespace {
+    /**
+     * @brief EnumStrings specialization for SplitAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::SplitAttr>::data[] = {"axis", "split"};
+    }
+
+namespace Aidge {
 /**
  * @class Split_Op
  * @brief Implements the Split operation to divide a tensor into multiple sub-tensors along a specified axis.
@@ -173,6 +183,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output_0", "data_output_n"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::SplitAttr>::data;
+	}
 };
 
 /**
@@ -191,12 +209,5 @@ std::shared_ptr<Node> Split(DimSize_t nbOutput,
 
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for SplitAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::SplitAttr>::data[] = {"axis", "split"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_SPLIT_H_ */
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 5c966edaf27271da79f9950cdf007cfcf446dd8d..69fa9d493a321199ea2fddd61c7b769a668c6f42 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -48,7 +48,12 @@ enum class SqueezeAttr {
    */
   Axes
 };
-
+} // namespace Aidge
+namespace {
+  template <>
+  const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"axes"};
+}
+namespace Aidge {
 /**
  * @brief This operator has as purpose to remove dummy dimensions around given
  * axes.
@@ -142,6 +147,14 @@ public:
   static const std::vector<std::string> getOutputsName() {
     return {"squeezed"};
   }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::SqueezeAttr>::data;
+	}
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
@@ -152,9 +165,4 @@ inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {},
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"Axes"};
-}
-
 #endif // AIDGE_CORE_OPERATOR_SQUEEZE_H_
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
index 71e4e780a1f1c2993ff05f09ef6aa92aab1986ee..21442844789f065cf0f127db4380f70c4618ca86 100644
--- a/include/aidge/operator/Stack.hpp
+++ b/include/aidge/operator/Stack.hpp
@@ -95,7 +95,15 @@ enum class StackAttr {
     ForwardStep,   // Tracks the current step in the forward pass.
     MaxElements    // Maximum number of elements that can be stacked.
 };
-
+}  // namespace Aidge
+namespace {
+    /**
+     * @brief String representations of the Stack operator's attributes.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::StackAttr>::data[] = {"forward_step", "max_elements"};
+}
+namespace Aidge {
 /**
  * @class StackOp
  * @brief The `Stack` operator performs a stacking operation over a sequence of input tensors.
@@ -212,6 +220,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::StackAttr>::data;
+	}
 };
 
 /**
@@ -223,12 +239,5 @@ public:
 std::shared_ptr<Node> Stack(std::uint32_t maxElements = 0, const std::string& name = "");
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief String representations of the Stack operator's attributes.
- */
-template <>
-const char *const EnumStrings<Aidge::StackAttr>::data[] = {"forward_step", "max_elements"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_STACK_H_ */
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index ab3b18e51d54e97d22d6c9006803d7e804064783..2619c5ea5d41407100b66f909d6f64176027f74c 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -54,13 +54,21 @@ public:
 enum class TransposeAttr {
     /**
      * @brief Order of the output dimensions relative to the input dimensions.
-     * 
+     *
      * If this attribute is empty, the dimensions of the input tensor will
      * be reversed.
      */
     OutputDimsOrder
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for TransposeAttr.
+     */
+    template <>
+    const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"output_dims_order"};
+    }
+namespace Aidge {
 /**
  * @brief Describes the operation of transposing the axes of a given tensor.
  *
@@ -166,6 +174,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::TransposeAttr>::data;
+	}
 };
 
 /**
@@ -180,12 +196,5 @@ std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder =
 
 }  // namespace Aidge
 
-namespace {
-/**
- * @brief EnumStrings specialization for TransposeAttr.
- */
-template <>
-const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"output_dims_order"};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_TRANSPOSE_H_ */
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 333413b1d8d6530b14469ec1f451e9acfeead286..d220807d6cd4ea2c57c152c9e8351bc48211d06e 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -71,13 +71,25 @@ enum class UnfoldAttr {
      */
     KernelDims
 };
-
+} // namespace Aidge
+namespace {
+    /**
+     * @brief EnumStrings specialization for UnfoldAttr.
+     */
+    template <>
+    const char* const EnumStrings<Aidge::UnfoldAttr>::data[] = {
+        "stride_dims",
+        "dilation_dims",
+        "kernel_dims"
+    };
+}
+namespace Aidge {
 /**
  * @brief Describes the operation of unfolding a tensor into sliding blocks.
- * 
+ *
  * The Unfold operator extracts sliding blocks from the input tensor along
  * specified dimensions, controlled by stride, dilation, and kernel size.
- * 
+ *
  * @tparam DIM Number of dimensions involved in the operation.
  *
  * @example Input: Tensor of dimensions `[1, 3, 32, 32]`, with `KernelDims = {3, 3}`,
@@ -199,6 +211,14 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::UnfoldAttr>::data;
+	}
 };
 
 /**
@@ -229,16 +249,5 @@ inline std::shared_ptr<Node> Unfold( DimSize_t const (&kernelDims)[DIM],
 
 extern template class Aidge::Unfold_Op<2>;
 
-namespace {
-/**
- * @brief EnumStrings specialization for UnfoldAttr.
- */
-template <>
-const char* const EnumStrings<Aidge::UnfoldAttr>::data[] = {
-    "stride_dims",
-    "dilation_dims",
-    "kernel_dims"
-};
-}
 
 #endif /* AIDGE_CORE_OPERATOR_UNFOLD_H_ */
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index c0710540576959b62bbdf235ff6ea15f9d18cacd..a78a986724d4b5ca06f611b82e057d13183c5015 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -47,7 +47,12 @@ enum class UnsqueezeAttr {
    */
   Axes
 };
-
+} // namespace Aidge
+namespace {
+  template <>
+  const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"axes"};
+}
+namespace Aidge {
 /**
  * @brief This operator has as purpose to add a dummy dimension around given
  * axis. Unsqueezing the 2nd dim of a tensor of dim (1,2,3,4) will result in a
@@ -140,6 +145,14 @@ public:
   static const std::vector<std::string> getOutputsName() {
     return {"unsqueezed"};
   }
+
+	/**
+	 * @brief Retrieves the names of the attributes for the operator.
+	 * @return A vector containing the attributes name.
+	 */
+	static const char* const* attributesName(){
+		return EnumStrings<Aidge::UnsqueezeAttr>::data;
+	}
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
@@ -150,9 +163,4 @@ inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"Axes"};
-}
-
 #endif // AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
diff --git a/include/aidge/operator/WeightInterleaving.hpp b/include/aidge/operator/WeightInterleaving.hpp
index 315bb3e2dd163f23949ac09719a20c335e03c265..a8f8c3d743aaef11bea0bbc03c949907348a7d7c 100644
--- a/include/aidge/operator/WeightInterleaving.hpp
+++ b/include/aidge/operator/WeightInterleaving.hpp
@@ -30,10 +30,10 @@ namespace Aidge {
  * @brief WeightInterleaving operator Compresses the last dimension of a tensor by packing low-bitwidth values
  * (e.g., 2, 3, or 4 bits) into fewer bytes.
  *
- * The operator reduces the size of the last dimension based on the bitwidth (`nb_bits`), 
- * packing multiple values into each byte. For example, 4-bit values result in a halved last dimension, 
+ * The operator reduces the size of the last dimension based on the bitwidth (`nb_bits`),
+ * packing multiple values into each byte. For example, 4-bit values result in a halved last dimension,
  * while 2-bit values reduce it by a factor of 4.
- * 
+ *
  * The output tensor has the same shape as the input, except for the compressed last dimension.
  *
  * @see OperatorTensor
@@ -78,10 +78,10 @@ public:
 
     /**
      * @brief Calculates the required size for the 8-bits`compactData` vector.
-     * 
+     *
      * This function determines the minimum number of bytes needed in `compactData`
      * to store `dataSize` elements compacted to `nb_bits` bits each.
-     * 
+     *
      * @param dataSize The total number of elements in the input data array.
      * @param nb_bits The number of bits to use for each compacted element (from 1 to 7).
      * @return std::size_t The required size in bytes for `compactData`.
diff --git a/python_binding/operator/pybind_ArgMax.cpp b/python_binding/operator/pybind_ArgMax.cpp
index 3de54afd7a669347cc2b272cff9b87cf152be09a..75f3257499fc2edf5007aaa51c1198d39182d880 100644
--- a/python_binding/operator/pybind_ArgMax.cpp
+++ b/python_binding/operator/pybind_ArgMax.cpp
@@ -43,6 +43,14 @@ void init_ArgMax(py::module &m) {
     .def(py::init<std::int32_t, bool, bool>(), py::arg("axis"), py::arg("keep_dims"), py::arg("select_last_index"))
     .def_static("get_inputs_name", &ArgMax_Op::getInputsName)
     .def_static("get_outputs_name", &ArgMax_Op::getOutputsName)
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = ArgMax_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<ArgMaxAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     ;
   declare_registrable<ArgMax_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index e376bcffb6ee10449a8bca8cc89f26528865d291..6130fc2717b0505de41648e5d617b570f7feca5c 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -31,17 +31,17 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 
   const std::string pyClassName("AvgPooling" + std::to_string(DIM) + "DOp");
   const std::string pyStaticAttrClassName("StaticAttributes" + pyClassName);
-  
+
   py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor>(
         m, pyClassName.c_str(),
         py::multiple_inheritance(),
         R"mydelimiter(
         Initialize an AvgPooling operator for a tensor.
 
-        This operator performs average pooling on the input tensor using the specified kernel dimensions 
+        This operator performs average pooling on the input tensor using the specified kernel dimensions
         and stride dimensions.
 
-        :param kernel_dims: The size of the kernel (filter) applied during pooling. 
+        :param kernel_dims: The size of the kernel (filter) applied during pooling.
                              Specifies the dimensions of the kernel (e.g., [3, 3] for 2D pooling).
         :type kernel_dims: List[int]
         :param stride_dims: The stride of the pooling operation. Specifies how much the kernel moves in each step.
@@ -60,8 +60,17 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
             py::arg("stride_dims") = create_array<DimSize_t, DIM>(1),
             py::arg("dilations") = create_array<DimSize_t, DIM>(1),
             py::arg("ceil_mode") = false)
-    .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+    .def_static("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
+    .def_static("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = AvgPooling_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<AvgPoolingAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 3339db0f2dbf7a82f2a0833bb468941d4dbef6c5..199ef813481e324c3dbbbfbe6db2dad125a213d1 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -42,6 +42,15 @@ void declare_BatchNormOp(py::module& m) {
             py::arg("training_mode"))
         .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = BatchNorm_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<BatchNormAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &BatchNorm_Op<DIM>::Type);
 
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_BitShift.cpp b/python_binding/operator/pybind_BitShift.cpp
index b4f6c90e54e781b011459be6e8e6e252e7347b00..f2f4b223df788c27dc1378d8564c881b907901c4 100644
--- a/python_binding/operator/pybind_BitShift.cpp
+++ b/python_binding/operator/pybind_BitShift.cpp
@@ -35,7 +35,15 @@ void init_BitShift(py::module &m) {
         .def(py::init<BitShift_Op::BitShiftDirection>(), py::arg("direction"))
         .def("direction", &BitShift_Op::direction, "Get the direction of the bit shift (left or right).")
         .def_static("get_inputs_name", &BitShift_Op::getInputsName, "Get the names of the input tensors.")
-        .def_static("get_outputs_name", &BitShift_Op::getOutputsName, "Get the names of the output tensors.");
+        .def_static("get_outputs_name", &BitShift_Op::getOutputsName, "Get the names of the output tensors.")
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = BitShift_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<BitShiftAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		});
 
     // Enum binding under BitShiftOp class
     py::enum_<BitShift_Op::BitShiftDirection>(pyBitShiftOp, "BitShiftDirection")
diff --git a/python_binding/operator/pybind_Cast.cpp b/python_binding/operator/pybind_Cast.cpp
index 960a084ff063e6310a4526dd65e8dabb0e8f905a..1e0ad7f9b27b94016ff28d868f4a74a8e37fadf1 100644
--- a/python_binding/operator/pybind_Cast.cpp
+++ b/python_binding/operator/pybind_Cast.cpp
@@ -32,7 +32,15 @@ void init_Cast(py::module &m) {
         .def(py::init<DataType>(), py::arg("target_type"))
         .def("target_type", &Cast_Op::targetType, "Get the targeted type, output tensor data type")
         .def_static("get_inputs_name", &Cast_Op::getInputsName, "Get the names of the input tensors.")
-        .def_static("get_outputs_name", &Cast_Op::getOutputsName, "Get the names of the output tensors.");
+        .def_static("get_outputs_name", &Cast_Op::getOutputsName, "Get the names of the output tensors.")
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Cast_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<CastAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		});
 
     // Binding for the Cast function
     m.def("Cast", &Cast, py::arg("target_type"), py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Clip.cpp b/python_binding/operator/pybind_Clip.cpp
index 7c4563a98244e108d274ecb22562c497243fc1bc..a22a002d470261ba0ab88286891674c63a1cf691 100644
--- a/python_binding/operator/pybind_Clip.cpp
+++ b/python_binding/operator/pybind_Clip.cpp
@@ -1,59 +1,68 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <pybind11/pybind11.h>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/operator/Clip.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/utils/Types.h"
-
-namespace py = pybind11;
-namespace Aidge {
-
-void init_Clip(py::module& m) {
-    py::class_<Clip_Op, std::shared_ptr<Clip_Op>, OperatorTensor>(m, "ClipOp", py::multiple_inheritance(),
-        R"mydelimiter(
-        Initialize a Clip operator.
-
-        :param min : Minimum clipping value. Default is the lowest possible float value.
-        :type min : :py:class:`float`
-        :param max : Maximum clipping value. Default is the highest possible float value.
-        :type max : :py:class:`float`
-        )mydelimiter")
-    .def(py::init<float, float>(), py::arg("min") = std::numeric_limits<float>::lowest(), py::arg("max") = std::numeric_limits<float>::max())
-    .def_static("get_inputs_name", &Clip_Op::getInputsName)
-    .def_static("get_outputs_name", &Clip_Op::getOutputsName)
-    .def("min", &Clip_Op::min, py::return_value_policy::reference_internal)
-    .def("max", &Clip_Op::max, py::return_value_policy::reference_internal);
-
-    declare_registrable<Clip_Op>(m, "ClipOp");
-
-    m.def("Clip", &Clip, py::arg("name") = "",
-        py::arg("min") = std::numeric_limits<float>::lowest(),
-        py::arg("max") = std::numeric_limits<float>::max(),
-        R"mydelimiter(
-        ClipOp is a tensor operator that performs a clipping operation on tensor elements.
-        This class allows limiting tensor values to a specified range, defined by the `min` 
-        and `max` parameters. Values outside this range are replaced by the corresponding 
-        limit values. When `min` is greater than `max`, the clip operator sets all the 'input' values to the value of `max`.
-
-        :param min: Minimum clipping value.
-        :type min: :py:class:`float`
-        :param max: Maximum clipping value.
-        :type max: :py:class:`float`
-        :param name: Name of the node.
-        :type name: :py:class:`str`
-        )mydelimiter");
-}
-
-}  // namespace Aidge
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Clip.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Clip(py::module& m) {
+    py::class_<Clip_Op, std::shared_ptr<Clip_Op>, OperatorTensor>(m, "ClipOp", py::multiple_inheritance(),
+        R"mydelimiter(
+        Initialize a Clip operator.
+
+        :param min : Minimum clipping value. Default is the lowest possible float value.
+        :type min : :py:class:`float`
+        :param max : Maximum clipping value. Default is the highest possible float value.
+        :type max : :py:class:`float`
+        )mydelimiter")
+    .def(py::init<float, float>(), py::arg("min") = std::numeric_limits<float>::lowest(), py::arg("max") = std::numeric_limits<float>::max())
+    .def_static("get_inputs_name", &Clip_Op::getInputsName)
+    .def_static("get_outputs_name", &Clip_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Clip_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ClipAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
+    .def("min", &Clip_Op::min, py::return_value_policy::reference_internal)
+    .def("max", &Clip_Op::max, py::return_value_policy::reference_internal);
+
+    declare_registrable<Clip_Op>(m, "ClipOp");
+
+    m.def("Clip", &Clip, py::arg("name") = "",
+        py::arg("min") = std::numeric_limits<float>::lowest(),
+        py::arg("max") = std::numeric_limits<float>::max(),
+        R"mydelimiter(
+        ClipOp is a tensor operator that performs a clipping operation on tensor elements.
+        This class allows limiting tensor values to a specified range, defined by the `min` 
+        and `max` parameters. Values outside this range are replaced by the corresponding 
+        limit values. When `min` is greater than `max`, the clip operator sets all the 'input' values to the value of `max`.
+
+        :param min: Minimum clipping value.
+        :type min: :py:class:`float`
+        :param max: Maximum clipping value.
+        :type max: :py:class:`float`
+        :param name: Name of the node.
+        :type name: :py:class:`str`
+        )mydelimiter");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 9e1b3de9e7b1f6bd8c84779196c1918294cedb18..236f1692263e94a8fdf4278f18f61d71e247e1df 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -24,30 +24,39 @@ void init_Concat(py::module& m) {
         R"mydelimiter(
         Initialize a Concat operator.
 
-        :param nb_inputs : The number of input tensors to concatenate.
-        :type nb_inputs : :py:class:`int`
-        :param axis : The axis along which to concatenate the tensors.
-        :type axis : :py:class:`int`
+        :param nb_inputs: The number of input tensors to concatenate.
+        :type nb_inputs: :py:class:`int`
+        :param axis: The axis along which to concatenate the tensors, default=0.
+        :type axis: :py:class:`int`
         )mydelimiter")
         .def(py::init<const IOIndex_t, const int>(),
              py::arg("nb_inputs"),
-             py::arg("axis"))
+             py::arg("axis") = 0)
         .def_static("get_inputs_name", &Concat_Op::getInputsName)
         .def_static("get_outputs_name", &Concat_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Concat_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ConcatAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Concat_Op::Type);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
 
-    m.def("Concat", &Concat, py::arg("nb_inputs"), py::arg("axis"), py::arg("name") = "",
+    m.def("Concat", &Concat, py::arg("nb_inputs"), py::arg("axis") = 0, py::arg("name") = "",
         R"mydelimiter(
         Initialize a node containing a Concat operator.
 
-        :param nb_inputs : The number of input tensors to concatenate.
-        :type nb_inputs : :py:class:`int`
-        :param axis : The axis along which to concatenate the tensors.
-        :type axis : :py:class:`int`
-        :param name : Name of the node.
-        :type name : :py:class:`str`
+        :param nb_inputs: The number of input tensors to concatenate.
+        :type nb_inputs: :py:class:`int`
+        :param axis: The axis along which to concatenate the tensors.
+        :type axis: :py:class:`int`
+        :param name: Name of the node.
+        :type name: :py:class:`str`
         )mydelimiter");
 }
 
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
index 07079d98301f0f778185d0fb70f6d38b18aec5e8..b185f2f80a70faab7cd5269d43ba695466449654 100644
--- a/python_binding/operator/pybind_ConstantOfShape.cpp
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -27,12 +27,20 @@ void init_ConstantOfShape(py::module &m) {
       R"mydelimiter(
       Initialize a ConstantOfShape operator.
 
-      :param value : Tensor with a given datatype that contains the value 
+      :param value : Tensor with a given datatype that contains the value
                      that will fill the output tensor.
       :type value : :py:class:`Tensor`
       )mydelimiter")
-      .def("get_inputs_name", &ConstantOfShape_Op::getInputsName)
-      .def("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
+      .def_static("get_inputs_name", &ConstantOfShape_Op::getInputsName)
+      .def_static("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
+      .def_static("attributes_name", []() {
+        std::vector<std::string> result;
+        auto attributes = ConstantOfShape_Op::attributesName();
+        for (size_t i = 0; i < size(EnumStrings<ConstantOfShapeAttr>::data); ++i) {
+          result.emplace_back(attributes[i]);
+        }
+        return result;
+      })
       .def("value", &ConstantOfShape_Op::value);
 
   m.def("ConstantOfShape", &ConstantOfShape, py::arg("value") = Tensor(0.f),
@@ -40,7 +48,7 @@ void init_ConstantOfShape(py::module &m) {
         R"mydelimiter(
         Initialize a node containing a ConstantOfShape operator.
 
-        :param value : Tensor with a given datatype that contains the value 
+        :param value : Tensor with a given datatype that contains the value
                        that will fill the output tensor.
         :type value : :py:class:`Tensor`
         :param name  : Name of the node.
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 6ab073be6a494e63bf045c40faed22fa17fafe8e..e65a74c0c65ae413e8f76a87e52644690634cfef 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -43,6 +43,15 @@ void declare_ConvOp(py::module &m) {
             py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
         .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Conv_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ConvAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def("in_channels", &Conv_Op<DIM>::inChannels)
         .def("out_channels", &Conv_Op<DIM>::outChannels)
         .def_readonly_static("Type", &Conv_Op<DIM>::Type)
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 5e24431d7e7e230f04cb76237108a4efe97f117f..7ddbefd3dea69be8bedb750c5686e13811151c04 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -56,6 +56,15 @@ void declare_ConvDepthWiseOp(py::module &m) {
         py::arg("dilation_dims"))
   .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = ConvDepthWise_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ConvDepthWiseAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+				return result;
+		})
   .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels)
   .def_readonly_static("Type", &ConvDepthWise_Op<DIM>::Type);
 
diff --git a/python_binding/operator/pybind_DepthToSpace.cpp b/python_binding/operator/pybind_DepthToSpace.cpp
index efb8a7406774a5b071e8ebc3bda69d6ec773b50a..d33386711784f64c97535194366522f04f76f39c 100644
--- a/python_binding/operator/pybind_DepthToSpace.cpp
+++ b/python_binding/operator/pybind_DepthToSpace.cpp
@@ -37,6 +37,15 @@ void declare_DepthToSpace(py::module &m) {
         }), py::arg("block_size"), py::arg("mode") = "CRD")
     .def_static("get_inputs_name", &DepthToSpace_Op::getInputsName)
     .def_static("get_outputs_name", &DepthToSpace_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = DepthToSpace_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<DepthToSpaceAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &DepthToSpace_Op::Type)
     .def("__repr__", [](DepthToSpace_Op& b) {
         return fmt::format("Operator(type='{}')", b.Type);
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index fed44a1e283649ab12eb7e57b599984caed764d5..6afeb42a71787146b773fd2e460da4db3228c1c1 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -44,6 +44,15 @@ void init_Gather(py::module& m) {
                 py::arg("gathered_shape"))
         .def_static("get_inputs_name", &Gather_Op::getInputsName)
         .def_static("get_outputs_name", &Gather_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Gather_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<GatherAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Gather_Op::Type);
 
     declare_registrable<Gather_Op>(m, "GatherOp");
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index 3464941dda96a7ce0897f43b927c0ac3a79015c1..f4f0335fd11f2bc083dbc3d5b318818983949298 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -65,6 +65,15 @@ void declare_GridSampleOp(py::module &m) {
             py::arg("align_corners") = false)
         .def_static("get_inputs_name", &GridSample_Op::getInputsName)
         .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = GridSample_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<GridSampleAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &GridSample_Op::Type)
         ;
 
diff --git a/python_binding/operator/pybind_Heaviside.cpp b/python_binding/operator/pybind_Heaviside.cpp
index cbc2502aac018927c544a57f343a6305ee2bd86f..b8d7f1d802701933a7c1b5be9dcc7d9163f770a4 100644
--- a/python_binding/operator/pybind_Heaviside.cpp
+++ b/python_binding/operator/pybind_Heaviside.cpp
@@ -37,6 +37,15 @@ void init_Heaviside(py::module &m) {
         .def(py::init<float>(), py::arg("value"))
         .def_static("get_inputs_name", &Heaviside_Op::getInputsName)
         .def_static("get_outputs_name", &Heaviside_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Heaviside_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<HeavisideAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Heaviside_Op::Type);
 
     declare_registrable<Heaviside_Op>(m, "HeavisideOp");
diff --git a/python_binding/operator/pybind_LRN.cpp b/python_binding/operator/pybind_LRN.cpp
index bb04ed1c5d8ddf5ebade09e6bae07de454805119..f802152ba77f1506ac9d93284ecbe4a589b7de74 100644
--- a/python_binding/operator/pybind_LRN.cpp
+++ b/python_binding/operator/pybind_LRN.cpp
@@ -30,6 +30,15 @@ void init_LRN(py::module& m) {
         .def(py::init<std::int32_t>(), py::arg("size"))
         .def_static("get_inputs_name", &LRN_Op::getInputsName)
         .def_static("get_outputs_name", &LRN_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = LRN_Op::attributesName();
+			for (size_t i = 0; attributes[i] != nullptr; ++i) {
+				result.emplace_back(attributes[i]);
+			}
+				return result;
+		})
         .def_readonly_static("Type", &LRN_Op::Type);
 
     m.def("LRN", &LRN, py::arg("size"), py::arg("name") = "",
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 564fd90be04dd4639f7e00943533f190212d5808..ab81052d21e477a64a9f90766504741f4386730c 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -30,6 +30,15 @@ void init_LeakyReLU(py::module& m) {
         .def(py::init<float>(), py::arg("negative_slope"))
         .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
         .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = LeakyReLU_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<LeakyReLUAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &LeakyReLU_Op::Type);
 
     declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index bdbc1edd3cba67f6a7d703692a50f33355a8909e..953e56ebec8fc0a8d030f6cf9d79c9359848fa05 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -52,6 +52,15 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("ceil_mode"))
   .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
+
+  .def_static("attributes_name", []() {
+    std::vector<std::string> result;
+    auto attributes = MaxPooling_Op<DIM>::attributesName();
+    for (size_t i = 0; i < size(EnumStrings<MaxPoolingAttr>::data); ++i) {
+      result.emplace_back(attributes[i]);
+    }
+    return result;
+  })
   .def_readonly_static("Type", &MaxPooling_Op<DIM>::Type);
   
   declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Memorize.cpp b/python_binding/operator/pybind_Memorize.cpp
index 3ac1122111aae1a9b7eb353399e46562ae51b0b1..f583602c95692ff6e6084ba510f109e1f7ba65f9 100644
--- a/python_binding/operator/pybind_Memorize.cpp
+++ b/python_binding/operator/pybind_Memorize.cpp
@@ -23,7 +23,15 @@ void init_Memorize(py::module& m) {
     py::class_<Memorize_Op, std::shared_ptr<Memorize_Op>, OperatorTensor>(m, "MemorizeOp", py::multiple_inheritance())
         .def(py::init<const std::uint32_t>(), py::arg("end_step"))
         .def_static("get_inputs_name", &Memorize_Op::getInputsName)
-        .def_static("get_outputs_name", &Memorize_Op::getOutputsName);
+        .def_static("get_outputs_name", &Memorize_Op::getOutputsName)
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Memorize_Op::attributesName();
+			for (size_t i = 0;i < size(EnumStrings<MemorizeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		});
 
     declare_registrable<Memorize_Op>(m, "MemorizeOp");
 
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index 8058cd2a23c6c1bf91b44b347af9df57aac0635a..2b2cdea12fee04e88ccb715abebf9da768758de3 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -50,7 +50,7 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
     R"mydelimiter(
         Initialize a node containing a Padded Convolution operator.
 
-        This operator performs a convolution operation with explicit padding. It applies a 
+        This operator performs a convolution operation with explicit padding. It applies a
         kernel filter over an input tensor with specified stride and dilation settings.
 
         :param in_channels: Number of input channels.
@@ -92,8 +92,8 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
     R"mydelimiter(
         Initialize a Padded Convolution operator.
 
-        This function defines a convolution operation that includes explicit padding before 
-        applying the kernel. The padding allows control over output dimensions while maintaining 
+        This function defines a convolution operation that includes explicit padding before
+        applying the kernel. The padding allows control over output dimensions while maintaining
         receptive field properties.
 
         :param kernel_dims: The size of the convolutional kernel for each dimension.
@@ -135,8 +135,8 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
     R"mydelimiter(
         Initialize a node containing a Depthwise Padded Convolution operator.
 
-        This operator performs a depthwise convolution operation, where each input channel is 
-        convolved separately with a different kernel. The operation includes explicit padding, 
+        This operator performs a depthwise convolution operation, where each input channel is
+        convolved separately with a different kernel. The operation includes explicit padding,
         stride control, and dilation options.
 
         :param nb_channels: Number of input channels (also the number of output channels since depthwise convolution does not mix channels).
@@ -176,8 +176,8 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
     R"mydelimiter(
         Initialize a Depthwise Padded Convolution operator.
 
-        This function defines a depthwise convolution operation that includes explicit padding 
-        before applying the kernel. Depthwise convolution applies a separate filter to each 
+        This function defines a depthwise convolution operation that includes explicit padding
+        before applying the kernel. Depthwise convolution applies a separate filter to each
         input channel, preserving channel independence.
 
         :param kernel_dims: The size of the convolutional kernel for each dimension.
@@ -216,7 +216,7 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
     R"mydelimiter(
         Initialize a node containing a Padded Average Pooling operator.
 
-        This operator performs an average pooling operation with explicit padding. The output value 
+        This operator performs an average pooling operation with explicit padding. The output value
         is computed as the average of input values within a defined kernel window.
 
         :param kernel_dims: The size of the pooling kernel for each dimension.
@@ -255,7 +255,7 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
     R"mydelimiter(
         Initialize a Padded Average Pooling operator.
 
-        This function defines an average pooling operation with explicit padding before pooling is applied. 
+        This function defines an average pooling operation with explicit padding before pooling is applied.
         The operation computes the average of the elements inside each kernel window.
 
         :param kernel_dims: The size of the pooling kernel for each dimension.
@@ -296,7 +296,7 @@ template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
     R"mydelimiter(
         Initialize a node containing a Padded Max Pooling operator.
 
-        This operator performs a max pooling operation with explicit padding before pooling is applied. 
+        This operator performs a max pooling operation with explicit padding before pooling is applied.
         The output value is computed as the maximum of input values within a defined kernel window.
 
         :param kernel_dims: The size of the pooling kernel for each dimension.
@@ -335,7 +335,7 @@ template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
     R"mydelimiter(
         Initialize a Padded Max Pooling operator.
 
-        This function defines a max pooling operation with explicit padding before pooling is applied. 
+        This function defines a max pooling operation with explicit padding before pooling is applied.
         The operation computes the maximum of the elements inside each kernel window.
 
         :param kernel_dims: The size of the pooling kernel for each dimension.
@@ -364,8 +364,8 @@ void declare_LSTMOp(py::module &m) {
     R"mydelimiter(
         Initialize a node containing an LSTM (Long Short-Term Memory) operator.
 
-        The LSTM operator is a recurrent neural network (RNN) variant designed to model sequential data 
-        while addressing the vanishing gradient problem. It includes gating mechanisms to control 
+        The LSTM operator is a recurrent neural network (RNN) variant designed to model sequential data
+        while addressing the vanishing gradient problem. It includes gating mechanisms to control
         information flow through time.
 
         :param in_channels: The number of input features per time step.
@@ -388,7 +388,7 @@ void declare_LSTMOp(py::module &m) {
     R"mydelimiter(
         Initialize an LSTM operation.
 
-        This function sets up an LSTM operator to process sequential data. The LSTM maintains hidden 
+        This function sets up an LSTM operator to process sequential data. The LSTM maintains hidden
         states over time steps, allowing it to learn long-range dependencies.
 
         :param seq_length: The length of the input sequence.
@@ -402,7 +402,7 @@ void declare_LSTMOp(py::module &m) {
 
 
 void declare_LeakyOp(py::module &m) {
-    m.def("Leaky", &Leaky, 
+    m.def("Leaky", &Leaky,
           py::arg("nb_timesteps"),
           py::arg("beta"),
           py::arg("threshold") = 1.0,
@@ -410,7 +410,7 @@ void declare_LeakyOp(py::module &m) {
     R"mydelimiter(
         Initialize a Leaky neuron operator.
 
-        The Leaky operator introduces a decay factor, allowing neuron states to "leak" over time instead of resetting 
+        The Leaky operator introduces a decay factor, allowing neuron states to "leak" over time instead of resetting
         abruptly. This helps in maintaining temporal memory.
 
         :param nb_timesteps: The number of time steps for the operation.
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index fe899a75a5c0c8f74f4905388306835ffeed31ba..7b37bb20677f8c426adba6c84ac206aa94cc140b 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -50,6 +50,14 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("borderValue") = 0.0)
     .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
     .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Pad_Op<DIM>::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<PadAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &Pad_Op<DIM>::Type);
 
   declare_registrable<Pad_Op<DIM>>(m, pyClassName);
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index 2040f642bbfc0428be48a6f7ec21fa3aed20a371..20606d24df7716cc410a141971e569f960e472a8 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -23,6 +23,15 @@ void init_Pop(py::module& m) {
     .def(py::init<>())
     .def_static("get_inputs_name", &Pop_Op::getInputsName)
     .def_static("get_outputs_name", &Pop_Op::getOutputsName)
+
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = Pop_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<PopAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     .def_readonly_static("Type", &Pop_Op::Type);
 
     m.def("Pop", &Pop, py::arg("name") = "");
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 028e45755fb10bb01602959f721cf003cb1e5136..d29f6bfe7aa2f5f44bbc407923dce5bc5968fcc3 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -43,6 +43,14 @@ void declare_ReduceMeanOp(py::module &m) {
     .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes") = std::vector<std::int32_t>(), py::arg("keep_dims") = true, py::arg("noop_with_empty_axes") = false)
     .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = ReduceMean_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<ReduceMeanAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     .def_readonly_static("Type", &ReduceMean_Op::Type)
     ;
   declare_registrable<ReduceMean_Op>(m, pyClassName);
diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp
index eaa57ef1c663a03cfd59ce02c13c3c7028b69e01..f139f2e7b4ef1484430b814023296149734fd54a 100644
--- a/python_binding/operator/pybind_ReduceSum.cpp
+++ b/python_binding/operator/pybind_ReduceSum.cpp
@@ -43,6 +43,15 @@ void init_ReduceSum(py::module &m) {
     .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes"), py::arg("keep_dims"), py::arg("noop_with_empty_axes"))
     .def_static("get_inputs_name", &ReduceSum_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceSum_Op::getOutputsName)
+
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = ReduceSum_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<ReduceSumAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     ;
   declare_registrable<ReduceSum_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index e3244f5dd600e809bb428cd71bbad08348ec44ca..d263796ce016e4218807926781f6382b998f7e38 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -35,6 +35,15 @@ void init_Reshape(py::module& m) {
     .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
     .def_static("get_inputs_name", &Reshape_Op::getInputsName)
     .def_static("get_outputs_name", &Reshape_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Reshape_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ReshapeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &Reshape_Op::Type);
 
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
index 2aa62609835a7042dd0df54f28b453b7e33a3b5b..10a60e1f947a98d0325c72096a287df5fbe77d77 100644
--- a/python_binding/operator/pybind_Resize.cpp
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -25,10 +25,18 @@ namespace Aidge {
 void init_Resize(py::module &m) {
   py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(
           m, "ResizeOp", py::multiple_inheritance())
-          .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge)
-          .def_static("get_inputs_name", &Resize_Op::getInputsName)
-          .def_static("get_outputs_name", &Resize_Op::getOutputsName)
-          .def_readonly_static("Type", &Resize_Op::Type);
+        .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge)
+        .def_static("get_inputs_name", &Resize_Op::getInputsName)
+        .def_static("get_outputs_name", &Resize_Op::getOutputsName)
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Resize_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ResizeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+		    return result;
+		})
+        .def_readonly_static("Type", &Resize_Op::Type);
 
   declare_registrable<Resize_Op>(m, "ResizeOp");
 
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index c555bca8971fca4cc570741be5a0a7f5be266fa2..ba975bb0616131b045f3a3076ffc595f69d8aa90 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -41,6 +41,15 @@ void init_Scaling(py::module& m) {
              py::arg("is_output_unsigned"))
         .def_static("get_inputs_name", &Scaling_Op::getInputsName)
         .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Scaling_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ScalingAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Scaling_Op::Type);
 
     declare_registrable<Scaling_Op>(m, "ScalingOp");
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index cc7669a24b5c9febe41fea863a0966bfcdffc94e..3c8974bf0e572322dd4ddc0641f35b7ecbe7b56f 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -34,6 +34,15 @@ void init_Shape(py::module& m) {
         .def(py::init<const std::int64_t, const std::int64_t>(), py::arg("start"), py::arg("end"))
         .def_static("get_inputs_name", &Shape_Op::getInputsName)
         .def_static("get_outputs_name", &Shape_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Shape_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<ShapeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Shape_Op::Type);
 
     declare_registrable<Shape_Op>(m, "ShapeOp");
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index f01751b86a981f19f89e34c90780faeb6bd7e9b0..1cfd63f656f2fb9594dc6c4ee3a2591efa1ad25f 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -45,6 +45,15 @@ void init_Slice(py::module& m) {
                   py::arg("steps") = std::vector<std::int64_t>())
     .def_static("get_inputs_name", &Slice_Op::getInputsName)
     .def_static("get_outputs_name", &Slice_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Slice_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<SliceAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &Slice_Op::Type);
 
     declare_registrable<Slice_Op>(m, "SliceOp");
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 093f448e46be09ee1d77740efa9ed0cf70654737..7a4a687fd812c8d0366a435d2670a5e0110022f6 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -30,6 +30,15 @@ void init_Softmax(py::module& m) {
         .def(py::init<std::int32_t>(), py::arg("axis"))
         .def_static("get_inputs_name", &Softmax_Op::getInputsName)
         .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Softmax_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<SoftmaxAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &Softmax_Op::Type);
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
index f02a699e44cb2afca131e13fbce415fabcd45b80..052fa277e400d0ca25d7c123384e84f6ad607628 100644
--- a/python_binding/operator/pybind_Split.cpp
+++ b/python_binding/operator/pybind_Split.cpp
@@ -36,6 +36,15 @@ void init_Split(py::module& m) {
             py::arg("split"))
     .def_static("get_inputs_name", &Split_Op::getInputsName)
     .def_static("get_outputs_name", &Split_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Split_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<SplitAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
     .def_readonly_static("Type", &Split_Op::Type);
 
     declare_registrable<Split_Op>(m, "SplitOp");
diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
index ca90fb46af40189dbe66c320ecdd237470ffa112..7808c78da081f11875df2d3755506ecaccc03181 100644
--- a/python_binding/operator/pybind_Squeeze.cpp
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -24,29 +24,38 @@ namespace Aidge {
 
 void init_Squeeze(py::module &m) {
   py::class_<Squeeze_Op, std::shared_ptr<Squeeze_Op>, OperatorTensor>(
-      m, "SqueezeOp", py::multiple_inheritance(),
-		R"mydelimiter(
-		Initialize squeeze operator
-		:param axes :   axes to squeeze between [-r;r-1] 
-						with r = input_tensor.nbDims()
-						& r in [-128 , 127]
-		:type axes : :py:class: List[Int]
-		)mydelimiter")
-      .def("get_inputs_name", &Squeeze_Op::getInputsName)
-      .def("get_outputs_name", &Squeeze_Op::getOutputsName)
-      .def("axes", &Squeeze_Op::axes);
-  // Here we bind the constructor of the Squeeze Node. We add an argument
-  // for each attribute of the operator (in here we only have 'axes') and
-  // the last argument is the node's name.
-  m.def("Squeeze", &Squeeze, py::arg("axes") = std::vector<int8_t>({}),
+    m, "SqueezeOp", py::multiple_inheritance(),
+    R"mydelimiter(
+    Initialize squeeze operator
+    :param axes:   axes to squeeze between [-r;r-1]
+    				with r = input_tensor.nbDims()
+    				& r in [-128 , 127]
+    :type axes: :py:class: List[Int]
+    )mydelimiter")
+    .def_static("get_inputs_name", &Squeeze_Op::getInputsName)
+    .def_static("get_outputs_name", &Squeeze_Op::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = Squeeze_Op::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<SqueezeAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
+    .def("axes", &Squeeze_Op::axes);
+
+    declare_registrable<Squeeze_Op>(m, "SqueezeOp");
+    m.def("Squeeze", &Squeeze, py::arg("axes") = std::vector<int8_t>({}),
         py::arg("name") = "",
         R"mydelimiter(
-    Initialize a node containing a squeeze operator.
-	:param axes :   axes to squeeze between [-r;r-1] 
-					with r = input_tensor.nbDims()
-					& r in [-128 , 127]
-	:type axes : :py:class: List[Int]
-    :param name : name of the node.
-)mydelimiter");
+            Initialize a node containing a squeeze operator.
+            :param axes:   axes to squeeze between [-r;r-1]
+                            with r = input_tensor.nbDims()
+                            & r in [-128 , 127]
+            :type axes: :py:class: List[Int]
+            :param name: name of the node.
+            :type name: str
+        )mydelimiter");
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Stack.cpp b/python_binding/operator/pybind_Stack.cpp
index c9bd969faf714cacb0dbf44a0b0fe6e84281ffd8..026167446189de00de9e5f9dad8dbe794d010c61 100644
--- a/python_binding/operator/pybind_Stack.cpp
+++ b/python_binding/operator/pybind_Stack.cpp
@@ -26,6 +26,15 @@ void init_Stack(py::module &m) {
         .def(py::init<const std::uint32_t>(), py::arg("max_elements"))
         .def_static("get_inputs_name", &StackOp::getInputsName)
         .def_static("get_outputs_name", &StackOp::getOutputsName)
+
+		.def_static("attributes_name", []() {
+			std::vector<std::string> result;
+			auto attributes = StackOp::attributesName();
+			for (size_t i = 0; i < size(EnumStrings<StackAttr>::data); ++i) {
+				result.emplace_back(attributes[i]);
+			}
+			return result;
+		})
         .def_readonly_static("Type", &StackOp::s_type);
 
     m.def("Stack",
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index 20794a15585529e10a83d2bf6fa7c18edfbde3fa..1882aa4c439b88413a3d9e94d4df0605bfec87a1 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -38,6 +38,14 @@ void declare_Transpose(py::module &m) {
     .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order")=std::vector<std::size_t>())
     .def_static("get_inputs_name", &Transpose_Op::getInputsName)
     .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
+	.def_static("attributes_name", []() {
+		std::vector<std::string> result;
+		auto attributes = Transpose_Op::attributesName();
+		for (size_t i = 0; i < size(EnumStrings<TransposeAttr>::data); ++i) {
+			result.emplace_back(attributes[i]);
+		}
+		return result;
+	})
     .def_readonly_static("Type", &Transpose_Op::Type);
   declare_registrable<Transpose_Op>(m, pyClassName);
   m.def("Transpose", &Transpose, py::arg("output_dims_order")=std::vector<std::size_t>(), py::arg("name") = "",
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index b61cb40cedbb5bfbc197c401454f205c737bc6ee..1ef94202cba1fe53e63a30780e95689526ec900a 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -23,26 +23,33 @@ void init_Unsqueeze(py::module &m) {
   py::class_<Unsqueeze_Op, std::shared_ptr<Unsqueeze_Op>, OperatorTensor>(
       m, "UnsqueezeOp", py::multiple_inheritance(),
       R"mydelimiter(
-		Initialize an unsqueeze operator.
-		:param axes :   axes to unsqueeze between [-r;r-1] 
-						with r = input_tensor.nbDims() + len(axes)
-		:type axes : :py:class: List[Int]
+            Initialize an unsqueeze operator.
+            :param axes:   axes to unsqueeze between [-r;r-1] with r = input_tensor.nbDims() + len(axes)
+            :type axes: :py:class: List[Int]
 		)mydelimiter")
       // Here we bind the methods of the Unsqueeze_Op that will want to access
-      .def("get_inputs_name", &Unsqueeze_Op::getInputsName)
-      .def("get_outputs_name", &Unsqueeze_Op::getOutputsName)
-      .def("axes", &Unsqueeze_Op::axes);
-  // Here we bind the constructor of the Unsqueeze Node. We add an argument for
-  // each attribute of the operator (in here we only have 'axes') and the last
-  // argument is the node's name.
+      .def_static("get_inputs_name", &Unsqueeze_Op::getInputsName)
+      .def_static("get_outputs_name", &Unsqueeze_Op::getOutputsName)
+        .def_static("attributes_name", []() {
+            std::vector<std::string> result;
+            auto attributes = Unsqueeze_Op::attributesName();
+            for (size_t i = 0; i < size(EnumStrings<UnsqueezeAttr>::data); ++i) {
+                result.emplace_back(attributes[i]);
+            }
+            return result;
+        })
+      .def_readonly_static("Type", &Unsqueeze_Op::Type)
+      ;
+
+  declare_registrable<Unsqueeze_Op>(m, "UnsqueezeOp");
+
   m.def("Unsqueeze", &Unsqueeze, py::arg("axes") = std::vector<int8_t>({}),
         py::arg("name") = "",
         R"mydelimiter(
-    Initialize a node containing an unsqueeze operator.
-	:param axes :   axes to unsqueeze between [-r;r-1] 
-					with r = input_tensor.nbDims() + len(axes)
-	:type axes : :py:class: List[Int]
-    :param name : name of the node.
-)mydelimiter");
-}
+            Initialize a node containing an unsqueeze operator.
+            :param axes:   axes to unsqueeze between [-r;r-1] with r = input_tensor.nbDims() + len(axes)
+            :type axes: :py:class: List[Int]
+            :param name: name of the node.
+        )mydelimiter");
+    }
 } // namespace Aidge