diff --git a/include/aidge/data/DataFormat.hpp b/include/aidge/data/DataFormat.hpp
index 77be8680deeda422e6cf41e612eab21ecf80c5e1..4b3b949f007ed835fc2af0be15fc53284d188ce0 100644
--- a/include/aidge/data/DataFormat.hpp
+++ b/include/aidge/data/DataFormat.hpp
@@ -17,63 +17,84 @@
 
 #include "aidge/utils/logger/EnumString.hpp"
 
+#define PERM(...) { __VA_ARGS__ }
+
+//   (EnumName, StringLiteral, NumDims, Permutation (0-indexed), Description)
+#define LIST_DATAFORMAT_ATTR(X)                                                      \
+    X(Default, "Default",         0, PERM(), "Unspecified format: interpretation is implementation-dependent"), \
+    X(Any,         "Any",         0, PERM(), "Any format is valid"),                    \
+    X(NCHW,        "NCHW",        4, PERM(0, 1, 2, 3), "4D format: [batch][channel][height][width]"), \
+    X(NHWC,        "NHWC",        4, PERM(0, 2, 3, 1), "4D format: [batch][height][width][channel]"), \
+    X(CHWN,        "CHWN",        4, PERM(1, 2, 3, 0), "4D format: [channel][height][width][batch]"), \
+    X(NCDHW,       "NCDHW",       5, PERM(0, 1, 2, 3, 4), "5D format: [batch][channel][depth][height][width]"), \
+    X(NDHWC,       "NDHWC",       5, PERM(0, 2, 3, 4, 1), "5D format: [batch][depth][height][width][channel]"), \
+    X(CDHWN,       "CDHWN",       5, PERM(1, 2, 3, 4, 0), "5D format: [channel][depth][height][width][batch]")
+
+#define NB_DFORMAT 8
+
 namespace Aidge {
 
 /**
- * @brief Enumeration of supported tensor data layouts
+ * @brief Enumeration of supported tensor data layouts.
  *
- * Represents different memory layout formats for multi-dimensional tensors:
+ * Represents different memory layouts for multi-dimensional tensors.
+ * The dimensions typically represent:
  * - N: Batch size
  * - C: Channels
  * - H: Height
  * - W: Width
  * - D: Depth (for 3D tensors)
+ *
+ * The enum values are generated via the X-macro.
  */
-enum class DataFormat {
-    Default,    ///< Default format, implementation dependent
-    NCHW,      ///< 4D format: [batch][channel][height][width]
-    NHWC,      ///< 4D format: [batch][height][width][channel]
-    CHWN,      ///< 4D format: [channel][height][width][batch]
-    NCDHW,     ///< 5D format: [batch][channel][depth][height][width]
-    NDHWC,     ///< 5D format: [batch][depth][height][width][channel]
-    CDHWN,     ///< 5D format: [channel][depth][height][width][batch]
-    Any        ///< Unspecified format
+enum class DataFormat : int {
+#define X(enumName, str, nb, arr, desc) enumName
+    LIST_DATAFORMAT_ATTR(X)
+#undef X
 };
 
-using DataFormatTranspose = std::array<std::size_t, 5>;
-
 /**
- * @brief Dictionary of transpose operations between different formats
+ * @brief Alias for a fixed-size array representing a permutation.
  *
- * Contains permutation arrays to convert between different data formats.
- * @warning In this array only, dimension index starts at 1
- * (0 is reserved as default value).
+ * A DataFormatTranspose is an array of 5 size_t values that represents a permutation mapping
+ * (0-indexed) for dimension reordering. Only the first NumDims elements are valid.
  */
-constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
-    {},                 // Default
-    {1, 2, 3, 4},      // NCHW
-    {1, 3, 4, 2},      // NHWC
-    {2, 3, 4, 1},      // CHWN
-    {1, 2, 3, 4, 5},   // NCDHW
-    {1, 3, 4, 5, 2},   // NDHWC
-    {2, 3, 4, 5, 1}    // CDHWN
-}};
+using DataFormatTranspose = std::array<std::size_t, 5>;
 
 /**
- * @brief Get the permutation array for converting between data formats
+ * @brief Compute the permutation mapping to convert tensor data from a source format
+ *        to a destination format.
+ *
+ * This function performs the following steps:
+ *   1. If either format is DataFormat::Any, a runtime error is thrown.
+ *   2. If either format is DataFormat::Default or both dformat are equal, the identity permutation is returned.
+ *   3. Otherwise, the function retrieves the permutation mappings (from default NCHW)
+ *      for both the source and destination formats.
+ *   4. It computes the inverse of the source mapping (from source to default ordering).
+ *   5. Finally, it composes the inverse with the destination mapping to produce the
+ *      permutation mapping from source to destination.
+ *
+ * @param src The source data format.
+ * @param dst The destination data format.
+ * @return constexpr DataFormatTranspose The computed permutation array (0-indexed).
  *
- * @param src Source data format
- * @param dst Destination data format
- * @return DataFormatTranspose Permutation array to achieve the format conversion
+ * @throws std::runtime_error if either src or dst is DataFormat::Any.
+ * @pre The source and destination formats must have the same number of dimensions.
  */
-DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst);
+DataFormatTranspose getPermutationMapping(const DataFormat& src, const DataFormat& dst);
 
 } // namespace Aidge
 
 namespace {
 template <>
-const char* const EnumStrings<Aidge::DataFormat>::data[]
-    = {"Default", "NCHW", "NHWC", "CHWN", "NCDHW", "NDHWC", "CDHWN", "Any"};
+struct EnumStrings<Aidge::DataFormat> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::DataFormat>::data[] = {
+#define X(EnumName, Str, NumDims, Perm, Desc) Str
+    LIST_DATAFORMAT_ATTR(X)
+#undef X
+};
 }
 
 namespace Aidge {
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 785caaa0e8959ba34d438913a4c0e5bad3df0f86..5df59becdc41f12768935544a42aac24ffb3a333 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -431,7 +431,7 @@ public:
      * @brief Get the data format enum.
      * @return constexpr DataFormat
      */
-    constexpr DataFormat dataFormat() const noexcept { return mDataFormat; }
+    const DataFormat& dataFormat() const noexcept { return mDataFormat; }
 
     /**
      * @brief Set the DataType of the Tensor and converts data
@@ -462,13 +462,13 @@ public:
      *                  data is copy-transposed.
      */
     void setDataFormat(const DataFormat df, bool copyTrans = true) {
-        if (!copyTrans || df == dataFormat()) {
+        if (!copyTrans || df == dataFormat() || df == DataFormat::Default || dataFormat() == DataFormat::Default) {
             mDataFormat = df;
             return;
         }
-    
-        const auto transpose = getDataFormatTranspose(dataFormat(), df);
-        
+
+        const auto transpose = getPermutationMapping(dataFormat(), df);
+
         if (mImpl) {
             copyTranspose(*this, transpose);
         } else {
@@ -476,7 +476,7 @@ public:
             for (std::size_t i = 0; i < dims().size(); ++i) {
                 newDims.push_back(dims()[transpose[i]]);
             }
-    
+
             std::vector<std::size_t> newStrides(dims().size(), 1);
             for (size_t i = 0; i < dims().size(); ++i) {
                 for (size_t j = i + 1; j < dims().size(); ++j) {
@@ -486,9 +486,10 @@ public:
             mDims = std::move(newDims);
             mStrides = std::move(newStrides);
         }
-    
+
         mDataFormat = df;
     }
+
     /**
      * @brief Get the Impl object
      * @return constexpr const std::shared_ptr<TensorImpl>&
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index d71d095e7f2c0c9bda4781f3efda3fb7954a2ed6..9b16f76d52e1a8d19a225d5ead2d1d47e465fd30 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -213,10 +213,7 @@ public:
    * @param inID
    * @return std::pair<std::shared_ptr<Node>, IOIndex_t>
    */
-  inline std::pair<NodePtr, IOIndex_t> input(const IOIndex_t inID) const {
-    AIDGE_ASSERT((inID != gk_IODefaultIndex) && (inID < nbInputs()), "Input index out of bound.");
-    return std::pair<NodePtr, IOIndex_t>(mParents[inID], mIdOutParents[inID]);
-  }
+  std::pair<std::shared_ptr<Node>, IOIndex_t> input(const IOIndex_t inID) const;
 
 
   /**
@@ -328,7 +325,7 @@ public:
    * Default to 0.
    * @param otherInId ID of the other Node input to connect to the current Node.
    * Default to the first available data input.
-   * 
+   *
    * @note otherNode shared_ptr is passed by refenrece in order to be able to detect
    * possible dangling connection situations in debug using ref counting.
    */
@@ -509,7 +506,7 @@ private:
    * @param otherNode
    * @param outId
    * @param otherInId
-   * 
+   *
    * @note otherNode shared_ptr is passed by refenrece in order to be able to detect
    * possible dangling connection situations in debug using ref counting.
    */
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index bc97e1f5bdd4dcc80857db55b66e9b6bedb1fa62..5057310d30b118ea6e9707e200ec90f020af62c2 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -25,29 +25,35 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
+#define LIST_ARGMAX_ATTR(X)                       \
+    X(Axis, "axis", std::int32_t),                \
+    X(KeepDims, "keep_dims", bool),               \
+    X(SelectLastIndex, "select_last_index", bool) \
 
+namespace Aidge {
+/**
+ * @enum ArgMaxAttr
+ * @brief Attributes for the ArgMax operation.
+ *
+ * - Axis: Specifies the dimension along which the ArgMax operation is performed.
+ * - KeepDims: Indicates whether reduced dimensions should be kept or removed.
+ * - SelectLastIndex: Determines whether to select the first or last index in case of ties.
+ */
 enum class ArgMaxAttr {
-    /**
-     * @brief Specifies the dimension along which the ArgMax operation is performed.
-     */
-    Axis,
-    /**
-     * Indicates whether reduced dimensions should be kept or removed.
-     */
-    KeepDims,
-    /**
-     * Determines whether to select the first or last index in case of ties.
-     */
-    SelectLastIndex
+    GENERATE_LIST_ATTR_ENUM(LIST_ARGMAX_ATTR)
 };
 } // namespace Aidge
-/**
- * @brief Provides string representations for the ArgMaxAttr enumeration.
- */
+
 namespace {
-    template <>
-    const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
+template <>
+struct EnumStrings<Aidge::ArgMaxAttr> {
+    static const char* const data[];
+};
+/// @brief Provides string representations for the ArgMaxAttr enumeration.
+constexpr const char* const EnumStrings<Aidge::ArgMaxAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_ARGMAX_ATTR)
+};
+
 }
 namespace Aidge {
 /**
@@ -85,9 +91,8 @@ public:
 
 private:
     using Attributes_ = StaticAttributes<ArgMaxAttr,
-                                        std::int32_t,
-                                        bool,
-                                        bool>;
+                            GENERATE_LIST_ATTR_TYPE(LIST_ARGMAX_ATTR)
+                                        >;
     template <ArgMaxAttr e>
     using attr = typename Attributes_::template attr<e>;
     /// Pointer to the attribute storage.
@@ -190,7 +195,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::ArgMaxAttr>::data;
 	}
 };
@@ -214,6 +219,6 @@ std::shared_ptr<Node> ArgMax(std::int32_t axis = 0,
 
 }  // namespace Aidge
 
-
+#undef LIST_ARGMAX_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_ARGMAX_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 6022d6a2a1459bbfa1844f6c6d300ed8232abed4..505a0639851245e4f66a7ccf4456d5a30dd55da3 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -23,42 +23,45 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
+#define LIST_AVGPOOLING_ATTR(X)                     \
+    X(KernelDims, "kernel_dims", sizeArr_t<DIM>),   \
+    X(StrideDims, "stride_dims", sizeArr_t<DIM>),   \
+    X(Dilations,  "dilations",   sizeArr_t<DIM>),   \
+    X(CeilMode,   "ceil_mode",   bool)
+
 namespace Aidge {
+
 /**
- * @brief Attributes specific to the AvgPooling operation.
+ * @enum Attr
+ * @brief Attributes defining the configuration of a MaxPooling Operator.
+ *
+ * - **KernelDims**: Kernel dimensions specifying the size of the pooling window for each spatial dimension.
+ *   Must be an array of positive integers. Common examples include [2,2] or [3,3].
+ * - **StrideDims**: Stride dimensions for sliding the pooling window across the input.
+ *   The stride specifies how much the window moves after each operation.
+ *   Must be an array of positive integers. For example, [1,1] or [2,2].
+ * - **Dilations**: Dilation along each spatial axis. Default value is 1 for all axes.
+ *   Must be an array of positive integers. For example, [1,1].
+ * - **CeilMode**: Flag indicating whether to use ceil or floor when calculating output size.
+ *   - `true`: Use `ceil` for output size calculation.
+ *   - `false`: Use `floor` for output size calculation.
  */
 enum class AvgPoolingAttr {
-    /**
-     * @brief Stride dimensions for sliding the pooling window.
-     * Specifies the step size of the sliding window along each spatial dimension.
-     */
-    StrideDims,
-    /**
-     * @brief Dilation along each spatial axis. Default value is 1.
-     */
-    Dilations,
-    /**
-     * @brief Kernel dimensions for the pooling operation.
-     * Specifies the size of the pooling window along each spatial dimension.
-     */
-    KernelDims,
-    /**
-     * @brief Flag indicating whether to use ceil or floor when calculating output size.
-     * - `true`: Use `ceil` for output size calculation.
-     * - `false`: Use `floor` for output size calculation.
-     */
-    CeilMode
+    GENERATE_LIST_ATTR_ENUM(LIST_AVGPOOLING_ATTR)
 };
 } // namespace Aidge
+
 namespace {
-    /**
-     * @brief String representation of the AvgPooling attributes.
-     */
-    template <>
-    const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
-        "stride_dims", "kernel_dims", "dilations", "ceil_mode"
-    };
+template <>
+struct EnumStrings<Aidge::AvgPoolingAttr> {
+    static const char* const data[];
+};
+/// @brief String representation of the AvgPooling attributes.
+constexpr const char* const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_AVGPOOLING_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @brief Class representing an Average Pooling operation.
@@ -104,10 +107,8 @@ private:
      * @brief Static attributes representing kernel and stride dimensions.
      */
     using Attributes_ = StaticAttributes<AvgPoolingAttr,
-                                             std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, DIM>,
-                                             bool>;
+                            GENERATE_LIST_ATTR_TYPE(LIST_AVGPOOLING_ATTR)
+                                             >;
     template <AvgPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
 
@@ -238,7 +239,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::AvgPoolingAttr>::data;
 	}
 };
@@ -290,4 +291,6 @@ extern template class Aidge::AvgPooling_Op<2>;
 extern template class Aidge::AvgPooling_Op<3>;
 extern template class Aidge::AvgPooling_Op<4>;
 
+#undef LIST_AVGPOOLING_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 3521c9b16dcbbf73b0c3c4aea9d93047dc0a2f61..81a679502c3ba74747fc7d7612293d4af46acc4f 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -22,39 +22,39 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
+#define LIST_BATCHNORM_ATTR(X)              \
+    X(Epsilon, "epsilon", float),           \
+    X(Momentum, "momentum", float),         \
+    X(TrainingMode,  "training_mode", bool)
 
+namespace Aidge {
+/**
+ * @enum BatchNormAttr
+ * @brief Attributes for the Batch Normalization operation.
+ *
+ * - Epsilon: A small value added to the denominator to ensure numerical stability and avoid division by zero.
+ * - Momentum: Controls the weighting of past running averages in batch normalization statistics.
+ *   - `0.0`: Full reliance on current batch statistics.
+ *   - `1.0`: Complete reliance on the previous moving average.
+ * - TrainingMode: Determines whether the operator is in training mode.
+ *   - `true`: Uses the current batch statistics for normalization.
+ *   - `false`: Uses moving average statistics accumulated during training.
+ */
 enum class BatchNormAttr {
-  /**
-   * @brief Epsilon value to avoid division by zero during normalization.
-   *
-   * A small value added to the denominator during normalization to ensure numerical stability.
-   * Commonly used in batch normalization to avoid very small variance values.
-   */
-  Epsilon,
-
-  /**
-   * @brief Momentum factor for the moving average of batch statistics.
-   *
-   * Controls the weighting of past running averages in the batch normalization statistics.
-   * - `0.0`: Full reliance on current batch statistics.
-   * - `1.0`: Complete reliance on the previous moving average.
-   */
-  Momentum,
-
-  /**
-   * @brief Flag indicating whether the operator is in training mode.
-   *
-   * - `true`: Uses the current batch statistics for normalization.
-   * - `false`: Uses moving average statistics accumulated during training.
-   */
-  TrainingMode
+    GENERATE_LIST_ATTR_ENUM(LIST_BATCHNORM_ATTR)
 };
 } // namespace Aidge
+
 namespace {
-    template <>
-    const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum", "training_mode" };
+template <>
+struct EnumStrings<Aidge::BatchNormAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::BatchNormAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_BATCHNORM_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @class BatchNorm_Op
@@ -83,8 +83,9 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<BatchNormAttr, float, float, bool>;
-
+    using Attributes_ = StaticAttributes<BatchNormAttr,
+                            GENERATE_LIST_ATTR_TYPE(LIST_BATCHNORM_ATTR)
+                        >;
     template <BatchNormAttr e>
     using attr = typename Attributes_::template attr<e>;
 
@@ -162,7 +163,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::BatchNormAttr>::data;
 	}
 };
@@ -183,4 +184,6 @@ extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const bool, const std::string&);
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const bool, const std::string&);
 
+#undef LIST_BATCHNORM_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_BATCHNORM_H_ */
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index 3e9f8c3f22728afc4fae7abf5f60adc13c89ac76..c54d6a99fc2f945a02396af446d356004e94efc1 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -23,23 +23,25 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/StaticAttributes.hpp"
 
-namespace Aidge {
-
+#define LIST_BITSHIFT_ATTR(X) X(BitShiftdirection, "bit_shift_direction", BitShiftDirection)
 
+namespace Aidge {
 enum class BitShiftAttr {
-    /**
-     *
-     */
-    BitShiftdirection
+    GENERATE_LIST_ATTR_ENUM(LIST_BITSHIFT_ATTR)
 };
-}
+}  // namespace Aidge
+
 namespace {
-    /**
-     * @brief Specialization of `EnumStrings` for `BitShiftAttr`.
-     */
-    template <>
-    const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = {"bit_shift_direction"};
+/// @brief Specialization of `EnumStrings` for `BitShiftAttr`.
+template <>
+struct EnumStrings<Aidge::BitShiftAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::BitShiftAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_BITSHIFT_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @class BitShift_Op
@@ -71,7 +73,9 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<BitShiftAttr, BitShiftDirection>;
+    using Attributes_ = StaticAttributes<BitShiftAttr,
+                            GENERATE_LIST_ATTR_TYPE(LIST_BITSHIFT_ATTR)
+                        >;
 
     template <BitShiftAttr e>
     using attr = typename Attributes_::template attr<e>;
@@ -160,7 +164,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::BitShiftAttr>::data;
 	}
 };
@@ -177,6 +181,6 @@ inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direc
 
 } // namespace Aidge
 
-
+#undef LIST_BITSHIFT_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index b2ffbb553ce44f66f371a65f35340193bf04dab4..f003e30c30ce8bfa7a33d65b75bb83fc8ec17d93 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -30,20 +30,28 @@ public:
     void forward() override;
 };
 
+#define LIST_CAST_ATTR(X)  \
+    X(TargetType, "target_type", DataType)
+
 /**
  * @enum CastAttr
  * @brief Enum class defining the attributes for the Cast operator.
+ *
+ * - TargetType: Specifies the target data type for the cast operation.
  */
 enum class CastAttr {
-    /**
-     * @brief Target data type for the cast operation.
-     */
-    TargetType
+    GENERATE_LIST_ATTR_ENUM(LIST_CAST_ATTR)
 };
 } // namespace Aidge
+
 namespace {
-    template <>
-    const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
+template <>
+struct EnumStrings<Aidge::CastAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::CastAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_CAST_ATTR)
+};
 }
 namespace Aidge {
 /**
@@ -67,7 +75,9 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<CastAttr, DataType>;
+    using Attributes_ = StaticAttributes<CastAttr,
+        GENERATE_LIST_ATTR_TYPE(LIST_CAST_ATTR)
+    >;
 
     template <CastAttr e>
     using attr = typename Attributes_::template attr<e>;
@@ -147,7 +157,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::CastAttr>::data;
 	}
 };
@@ -162,4 +172,6 @@ std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name =
 
 }  // namespace Aidge
 
+#undef LIST_CAST_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
index 51ecb6eb36591c2e22ea47ba529b87d125c92a65..4d5d2a93c3434c53b48d718b0b908edc3f402dcb 100644
--- a/include/aidge/operator/Clip.hpp
+++ b/include/aidge/operator/Clip.hpp
@@ -23,23 +23,32 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
 
+#define LIST_CLIP_ATTR(X)  \
+    X(Min, "min", float),  \
+    X(Max, "max", float)
+
+namespace Aidge {
 /**
  * @enum ClipAttr
  * @brief Enum class defining the attributes for the Clip operator.
+ *
+ * - Min: Minimum value for clipping.
+ * - Max: Maximum value for clipping.
  */
 enum class ClipAttr {
-    Min,  /**< Minimum value for clipping. */
-    Max   /**< Maximum value for clipping. */
+    GENERATE_LIST_ATTR_ENUM(LIST_CLIP_ATTR)
 };
-}
+}  // namespace Aidge
+
 namespace {
-    /**
-     * @brief Specialization of EnumStrings for ClipAttr.
-     */
-    template <>
-    const char* const EnumStrings<Aidge::ClipAttr>::data[] = { "min", "max" };
+template <>
+struct EnumStrings<Aidge::ClipAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::ClipAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_CLIP_ATTR)
+};
 }
 
 namespace Aidge {
@@ -69,7 +78,9 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ClipAttr, float, float>;
+    using Attributes_ = StaticAttributes<ClipAttr,
+        GENERATE_LIST_ATTR_TYPE(LIST_CLIP_ATTR)
+    >;
 
     template <ClipAttr e>
     using attr = typename Attributes_::template attr<e>;
@@ -162,7 +173,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::ClipAttr>::data;
 	}
 };
@@ -182,4 +193,6 @@ std::shared_ptr<Aidge::Node> Clip(
 
 } // namespace Aidge
 
+#undef LIST_CLIP_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_CLIP_H_ */
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 1f8a357a830ef3bf3d945ea488425128ea99d3ed..3e5efb5f9bbdecb248d6fd9ab647469955aa0214 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -49,25 +49,34 @@ public:
      */
     void forward() override;
 };
+}  // namespace Aidge
 
+
+#define LIST_CONCAT_ATTR(X)  \
+    X(Axis, "axis", std::int32_t)
+
+namespace Aidge {
+/**
+ * @enum ConcatAttr
+ * @brief Attributes for the Concat operation.
+ *
+ * - Axis: index of dimension along which the input tensors are concatenated.
+ */
 enum class ConcatAttr {
-    /**
-     * @brief Axis along which to concat the input tensor.
-     *
-     * The specified axis determines the direction of concatenating.
-     */
-    Axis
+    GENERATE_LIST_ATTR_ENUM(LIST_CONCAT_ATTR)
 };
-} // namespace Aidge
+}  // namespace Aidge
+
 namespace {
-    /**
-     * @brief Specialization of EnumStrings for ConcatAttr.
-     */
-    template <>
-    const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
-        "axis"
-    };
+template <>
+struct EnumStrings<Aidge::ConcatAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_CONCAT_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @class Concat_Op
@@ -99,7 +108,7 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ConcatAttr, std::int32_t>;
+    using Attributes_ = StaticAttributes<ConcatAttr, GENERATE_LIST_ATTR_TYPE(LIST_CONCAT_ATTR)>;
 
     template <ConcatAttr e>
     using attr = typename Attributes_::template attr<e>;
@@ -184,7 +193,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::ConcatAttr>::data;
 	}
 };
@@ -200,4 +209,6 @@ std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0,
 
 } // namespace Aidge
 
+#undef LIST_CONCAT_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_CONCAT_H_ */
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index e78fba12ec89be456da0aca25c9bb15e170bdede..886df95a8f2478f84eae1f5548d558b2e5c4649b 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -12,41 +12,45 @@
 #ifndef AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
 #define AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
 
-#include <cstdint>
-#include <cstdlib>
-#include <functional>
-#include <limits>
 #include <memory>
+#include <functional>
+#include <set>
 #include <string>
-#include <vector>
 
-#include "aidge/data/Data.hpp"
-#include "aidge/graph/Node.hpp"
-#include "aidge/operator/Operator.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
-#include "aidge/utils/Types.h"
 
-namespace Aidge {
 
+#define LIST_CONSTANTOFSHAPE_ATTR(X)  \
+    X(Value, "value", Tensor)
+
+namespace Aidge {
+/**
+ * @enum ConstantOfShapeAttr
+ * @brief Attributes for the ConstantOfShape operation.
+ *
+ * - Value: A scalar tensor that holds a fixed datatype value to fill the output tensor.
+ */
 enum class ConstantOfShapeAttr {
-  /**
-   * @brief value to fill the output tensor with.
-   * Its a scalar tensor holding a value with a fixed datatype
-   */
-  Value,
+    GENERATE_LIST_ATTR_ENUM(LIST_CONSTANTOFSHAPE_ATTR)
 };
-} // namespace Aidge
-namespace {
-  template <>
-  const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"value"};
- } //namespace
+}  // namespace Aidge
 
-  namespace Aidge {
+namespace {
+template <>
+struct EnumStrings<Aidge::ConstantOfShapeAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_CONSTANTOFSHAPE_ATTR)
+};
+}
 
+namespace Aidge {
 /**
  * @brief This operator's purpose is to generate a tensor of shape given via
  * input and filled with a given value set via attribute.
@@ -62,7 +66,7 @@ public:
   static const std::string Type;
 
 private:
-  using Attributes_ = StaticAttributes<ConstantOfShapeAttr, Tensor>;
+  using Attributes_ = StaticAttributes<ConstantOfShapeAttr, GENERATE_LIST_ATTR_TYPE(LIST_CONSTANTOFSHAPE_ATTR)>;
   template <ConstantOfShapeAttr e>
   using attr = typename Attributes_::template attr<e>;
   const std::shared_ptr<Attributes_> mAttributes;
@@ -119,18 +123,20 @@ public:
     return mAttributes->template getAttr<ConstantOfShapeAttr::Value>();
   }
 
-  static const std::vector<std::string> getInputsName() { return {"input"}; }
-  static const std::vector<std::string> getOutputsName() {
-    return {"constant_of_shape"};
-  }
+    static const std::vector<std::string> getInputsName() noexcept {
+        return {"input"};
+    }
+    static const std::vector<std::string> getOutputsName() noexcept {
+        return {"constant_of_shape"};
+    }
 
-	/**
-	 * @brief Retrieves the names of the attributes for the operator.
-	 * @return A vector containing the attributes name.
-	 */
-	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ConstantOfShapeAttr>::data;
-	}
+    /**
+     * @brief Retrieves the names of the attributes for the operator.
+     * @return A vector containing the attributes name.
+     */
+    static constexpr const char* const* attributesName() noexcept {
+        return EnumStrings<Aidge::ConstantOfShapeAttr>::data;
+    }
 };
 
 // helper with C-style array instead of std::array for kernel_dims to allow
@@ -142,5 +148,6 @@ inline std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f),
 }
 } // namespace Aidge
 
-#endif // AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
+#undef LIST_CONSTANTOFSHAPE_ATTR
 
+#endif // AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index f9c9109282cb90dadfa9b26d6f830faf9fdecd7c..283d0136e50e7c051061acb5274e98368b3d26a2 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -13,43 +13,37 @@
 #define AIDGE_CORE_OPERATOR_CONV_H_
 
 #include <array>
-#include <cmath>    // std::floor
 #include <cstddef>  // std::size_t
 #include <string>
 #include <utility>  // std::pair
 #include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/operator/Producer.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
-#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
 
+#define LIST_CONV_ATTR(X)                            \
+    X(KernelDims, "kernel_dims", sizeArr_t<DIM>),    \
+    X(StrideDims, "stride_dims", sizeArr_t<DIM>),    \
+    X(DilationDims, "dilation_dims", sizeArr_t<DIM>)
+
+namespace Aidge {
 /**
- * @enum ConvAttr
+ * @enum Attr
  * @brief Attributes used for the Convolution operation.
+ *
+ * - StrideDims: The stride dimensions.
+ * - DilationDims: The dilation dimensions.
+ * - KernelDims: The kernel dimensions.
  */
 enum class ConvAttr {
-    StrideDims,     // The stride dimensions
-    DilationDims,   // The dilation dimensions
-    KernelDims      // The kernel dimensions
+    GENERATE_LIST_ATTR_ENUM(LIST_CONV_ATTR)
 };
-} // namespace Aidge
-namespace {
-    template <>
-    const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
-        "stride_dims",
-        "dilation_dims",
-        "kernel_dims"
-    };
-}
-namespace Aidge {
+
 /**
  * @class Conv_Op
  * @brief Convolution operator for performing a multi-dimensional convolution.
@@ -85,15 +79,13 @@ class Conv_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-private:
-    using Attributes_ = StaticAttributes<ConvAttr,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>>;
+    // Use the external enum so that Aidge::Conv_Op<DIM>::Attr is valid.
+    using Attr = ConvAttr;
 
-    template <ConvAttr e>
+private:
+    using Attributes_ = StaticAttributes<Attr, GENERATE_LIST_ATTR_TYPE(LIST_CONV_ATTR)>;
+    template <Attr e>
     using attr = typename Attributes_::template attr<e>;
-
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -110,9 +102,9 @@ public:
                       const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
         : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
           mAttributes(std::make_shared<Attributes_>(
-            attr<ConvAttr::StrideDims>(strideDims),
-            attr<ConvAttr::DilationDims>(dilationDims),
-            attr<ConvAttr::KernelDims>(kernelDims)))
+            attr<Attr::StrideDims>(strideDims),
+            attr<Attr::DilationDims>(dilationDims),
+            attr<Attr::KernelDims>(kernelDims)))
     {}
 
     /**
@@ -168,30 +160,14 @@ public:
      * @return The number of input channels.
      * @throws std::runtime_error If the operator has no associated weight tensor.
      */
-    DimSize_t inChannels() const {
-        if (!getInput(1)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
-        }
-        
-        // check format
-        if(getInput(1)->dataFormat()==Aidge::DataFormat::NHWC) 
-            return getInput(1)->template dims<DIM+2>()[DIM+1];
-        // default format is NCHW
-        return getInput(1)->template dims<DIM+2>()[1];
-    }
+    DimSize_t inChannels() const;
 
     /**
      * @brief Get the number of output channels.
      * @return The number of output channels.
      * @throws std::runtime_error If the operator has no associated weight tensor.
      */
-    DimSize_t outChannels() const {
-        if (!getInput(1)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed.");
-        }
-        // first weight dimension for both NCHW (Cout,Cin,H,W) and NHWC (Cout,H,W,Cin) data format
-        return getInput(1)->template dims<DIM+2>()[0];
-    }
+    DimSize_t outChannels() const;
 
     /**
      * @brief Get the attributes of the operator.
@@ -203,19 +179,19 @@ public:
      * @brief Get the stride dimensions.
      * @return The stride dimensions as a reference.
      */
-    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<Attr::StrideDims>(); }
 
     /**
      * @brief Get the dilation dimensions.
      * @return The dilation dimensions as a reference.
      */
-    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<Attr::DilationDims>(); }
 
     /**
      * @brief Get the kernel dimensions.
      * @return The kernel dimensions as a reference.
      */
-    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<Attr::KernelDims>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
@@ -229,9 +205,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ConvAttr>::data;
-	}
+	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -266,22 +240,35 @@ std::shared_ptr<Node> Conv(DimSize_t inChannels,
  * based on the kernel dimensions provided.
  */
 template <DimSize_t DIM>
-inline std::shared_ptr<Node> Conv(
+std::shared_ptr<Node> Conv(
     DimSize_t inChannels,
     DimSize_t outChannels,
     DimSize_t const (&kernelDims)[DIM],
     const std::string& name = "",
     const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-    bool noBias = false) {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
-}
+    bool noBias = false);
 
 }  // namespace Aidge
 
+namespace {
+template <>
+struct EnumStrings<Aidge::ConvAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::ConvAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_CONV_ATTR)
+};
+}
+
+template <Aidge::DimIdx_t DIM>
+constexpr const char* const* Aidge::Conv_Op<DIM>::attributesName() {
+    return EnumStrings<Aidge::Conv_Op<DIM>::Attr>::data;
+}
+
 extern template class Aidge::Conv_Op<1>;
 extern template class Aidge::Conv_Op<2>;
 
+#undef LIST_CONV_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index b307d67a61cabd416bb96db8558fb6960cd65cc4..341b6f76647059e94613feb0b87dfb3a0187d875 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -28,21 +28,35 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
+#define LIST_CONVDEPTHWISE_ATTR(X)                   \
+    X(KernelDims, "kernel_dims", sizeArr_t<DIM>),    \
+    X(StrideDims, "stride_dims", sizeArr_t<DIM>),    \
+    X(DilationDims, "dilation_dims", sizeArr_t<DIM>)
+
 namespace Aidge {
+/**
+ * @enum ConvDepthWiseAttr
+ * @brief Attributes used for the Convolution operation.
+ *
+ * - StrideDims: The stride dimensions.
+ * - DilationDims: The dilation dimensions.
+ * - KernelDims: The kernel dimensions.
+ */
 enum class ConvDepthWiseAttr {
-    StrideDims,   // The stride dimensions for the convolution.
-    DilationDims, // The dilation dimensions for the convolution.
-    KernelDims    // The kernel dimensions for the convolution.
+    GENERATE_LIST_ATTR_ENUM(LIST_CONVDEPTHWISE_ATTR)
 };
-} // namespace Aidge
+}  // namespace Aidge
+
 namespace {
-    template <>
-    const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {
-        "stride_dims",
-        "dilation_dims",
-        "kernel_dims"
-    };
+template <>
+struct EnumStrings<Aidge::ConvDepthWiseAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_CONVDEPTHWISE_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @class ConvDepthWise_Op
@@ -72,9 +86,8 @@ public:
 
 private:
     using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>>;
+                            GENERATE_LIST_ATTR_TYPE(LIST_CONVDEPTHWISE_ATTR)
+                        >;
 
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
@@ -203,7 +216,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::ConvDepthWiseAttr>::data;
 	}
 };
@@ -254,4 +267,6 @@ inline std::shared_ptr<Node> ConvDepthWise(
 extern template class Aidge::ConvDepthWise_Op<1>;
 extern template class Aidge::ConvDepthWise_Op<2>;
 
+#undef LIST_CONVDEPTHWISE_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index c99f7bbb7d882300b7f2f4278dda832189064ad5..7bf6ffdf3ad63986049558374afff642a71fc549 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -42,20 +42,35 @@ public:
      */
     void forward() override;
 };
+}  // namespace Aidge
+
+#define LIST_DEPTHTOSPACE_ATTR(X)               \
+    X(BlockSize, "block_size", std::uint32_t),    \
+    X(Mode, "mode", Aidge::DepthToSpace_Op::Mode)
 
+namespace Aidge {
 /**
  * @enum DepthToSpaceAttr
  * @brief Attributes for the DepthToSpace operation.
+ *
+ * - BlockSize: The block size for rearranging depth to spatial dimensions.
+ * - Mode: The mode for depth-to-space transformation.
  */
 enum class DepthToSpaceAttr {
-    BlockSize, /**< The block size for rearranging depth to spatial dimensions. */
-    Mode       /**< The mode for depth-to-space transformation. */
+    GENERATE_LIST_ATTR_ENUM(LIST_DEPTHTOSPACE_ATTR)
 };
-} // namespace Aidge
+}  // namespace Aidge
+
 namespace {
-    template <>
-    const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
+template <>
+struct EnumStrings<Aidge::DepthToSpaceAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_DEPTHTOSPACE_ATTR)
+};
 }
+
 namespace Aidge{
 /**
  * @class DepthToSpace_Op
@@ -92,7 +107,7 @@ public:
     enum class Mode { DCR, CRD };
 
 private:
-    using Attributes_ = StaticAttributes<DepthToSpaceAttr, std::uint32_t, Mode>;
+    using Attributes_ = StaticAttributes<DepthToSpaceAttr, GENERATE_LIST_ATTR_TYPE(LIST_DEPTHTOSPACE_ATTR)>;
     template <DepthToSpaceAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
@@ -174,7 +189,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::DepthToSpaceAttr>::data;
 	}
 };
@@ -192,5 +207,6 @@ std::shared_ptr<Node> DepthToSpace(const std::uint32_t blockSize,
 
 }  // namespace Aidge
 
+#undef LIST_DEPTHTOSPACE_ATTR
 
 #endif //AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
diff --git a/include/aidge/operator/Flatten.hpp b/include/aidge/operator/Flatten.hpp
index b61fc6912dd0e9f61dd2506370c591aae8c3a107..0ccc54eb770421dd726658dfbd4e44ee78f28cfb 100644
--- a/include/aidge/operator/Flatten.hpp
+++ b/include/aidge/operator/Flatten.hpp
@@ -43,22 +43,33 @@ public:
      */
     void forward() override;
 };
+}  // namespace Aidge
+
+#define LIST_FLATTEN_ATTR(X)  \
+    X(Axis, "axis", std::int64_t)
 
+namespace Aidge {
 /**
  * @enum FlattenAttr
  * @brief Defines attributes for the Flatten operator.
+ *
+ * - Axis: dimension index at which to flatten the input tensor.
  */
 enum class FlattenAttr {
-    /**
-     * @brief The axis at which to flatten the input tensor.
-     */
-    Axis
+    GENERATE_LIST_ATTR_ENUM(LIST_FLATTEN_ATTR)
 };
-} // namespace Aidge
+}  // namespace Aidge
+
 namespace {
-    template <>
-    const char *const EnumStrings<Aidge::FlattenAttr>::data[] = { "axis" };
+template <>
+struct EnumStrings<Aidge::FlattenAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::FlattenAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_FLATTEN_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @brief Description the Flatten operation to reshape a tensor into a 2D matrix.
@@ -85,7 +96,7 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<FlattenAttr, std::int64_t>;
+    using Attributes_ = StaticAttributes<FlattenAttr, GENERATE_LIST_ATTR_TYPE(LIST_FLATTEN_ATTR)>;
     template <FlattenAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -165,7 +176,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::FlattenAttr>::data;
 	}
 };
@@ -184,5 +195,6 @@ std::shared_ptr<Node> Flatten(std::int64_t axis = 1,
                             const std::string &name = "");
 }  // namespace Aidge
 
+#undef LIST_FLATTEN_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_FLATTEN_H_ */
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index 2f9974e8ed3b1723734a2483616feceace5bec33..9b71057fb20327c7c37d3ac9aa49d021e7c244cc 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -29,51 +29,37 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
+#define LIST_FOLD_ATTR(X)  \
+    X(OutputDims, "output_dims", sizeArr_t<DIM>),  \
+    X(StrideDims, "stride_dims", sizeArr_t<DIM>),  \
+    X(DilationDims, "dilation_dims", sizeArr_t<DIM>),  \
+    X(KernelDims, "kernel_dims", sizeArr_t<DIM>)
 
+namespace Aidge {
 /**
  * @enum FoldAttr
  * @brief Enumeration for the attributes of the Fold operation.
+ *
+ * - OutputDims: Specifies the shape of the output tensor after applying the fold operation.
+ * - StrideDims: Step sizes in each dimension during the fold operation.
+ * - DilationDims: Spacing between elements in the kernel during the fold.
+ * - KernelDims: Size of the kernel or filter applied during the fold.
  */
 enum class FoldAttr {
-    /**
-     * @brief Output dimensions of the fold operation.
-     *
-     * Specifies the shape of the output tensor after applying the fold operation.
-     */
-    OutputDims,
-
-    /**
-     * @brief Stride dimensions used during the fold operation.
-     *
-     * Strides are the step sizes in each dimension during the fold operation.
-     */
-    StrideDims,
-
-    /**
-     * @brief Dilation dimensions for the fold operation.
-     *
-     * Dilation is the spacing between elements in the kernel during the fold.
-     */
-    DilationDims,
-
-    /**
-     * @brief Kernel dimensions used for the fold operation.
-     *
-     * Specifies the size of the kernel or filter applied during the fold.
-     */
-    KernelDims
+    GENERATE_LIST_ATTR_ENUM(LIST_FOLD_ATTR)
 };
-} // namespace Aidge
+}  // namespace Aidge
+
 namespace {
-    template <>
-    const char* const EnumStrings<Aidge::FoldAttr>::data[] = {
-        "output_dims",
-        "stride_dims",
-        "dilation_dims",
-        "kernel_dims"
-    };
+template <>
+struct EnumStrings<Aidge::FoldAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::FoldAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_FOLD_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @class Fold_Op
@@ -112,11 +98,7 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<FoldAttr,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>>;
+    using Attributes_ = StaticAttributes<FoldAttr, GENERATE_LIST_ATTR_TYPE(LIST_FOLD_ATTR)>;
 
     template <FoldAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
@@ -225,7 +207,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::FoldAttr>::data;
 	}
 };
@@ -265,4 +247,6 @@ extern template class Aidge::Fold_Op<2>;
 
 }  // namespace Aidge
 
+#undef LIST_FOLD_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_FOLD_H_ */
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 86fc7bc7855473c6f73e3bcc36d46ef9b4956446..964e1b45de9e0690ea2109bd9bb0d42c82a073e9 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -43,29 +43,15 @@ public:
      */
     void forward() override;
 };
+} // namespace Aidge
 
-enum class GatherAttr {
-    /**
-     * @brief Axis along which to gather elements.
-     */
-    Axis,
 
-    /**
-     * @brief Indices specifying which elements to gather.
-     */
-    Indices,
+#define LIST_GATHER_ATTR(X)  \
+    X(Axis, "axis", std::int8_t),  \
+    X(Indices, "indices", std::vector<int64_t>),  \
+    X(GatheredShape, "gathered_shape", std::vector<DimSize_t>)
 
-    /**
-     * @brief Shape of the resulting gathered tensor.
-     */
-    GatheredShape
-};
 
-} // namespace Aidge
-namespace {
-    template <>
-    const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
-}
 namespace Aidge {
 /**
  * @brief Description for the Gather operation on an input tensor.
@@ -86,13 +72,21 @@ class Gather_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<GatherAttr,
-                                          std::int8_t,
-                                          std::vector<int64_t>,
-                                          std::vector<DimSize_t>>;
+    /**
+     * @enum Attr
+     * @brief Attributes for the Gather operation.
+     *
+     * - Axis: The axis along which to gather elements.
+     * - Indices: Specifies which elements to gather.
+     * - GatheredShape: The shape of the resulting gathered tensor.
+     */
+    enum class Attr {
+        GENERATE_LIST_ATTR_ENUM(LIST_GATHER_ATTR)
+    };
 
 private:
-    template <GatherAttr e>
+    using Attributes_ = StaticAttributes<Attr, GENERATE_LIST_ATTR_TYPE(LIST_GATHER_ATTR)>;
+    template <Attr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -161,19 +155,19 @@ public:
      * @brief Get the axis along which elements are gathered.
      * @return The axis attribute.
      */
-    inline std::int8_t& axis() const { return mAttributes->getAttr<GatherAttr::Axis>(); }
+    inline std::int8_t& axis() const { return mAttributes->getAttr<Attr::Axis>(); }
 
     /**
      * @brief Get the indices specifying which elements to gather.
      * @return The indices attribute.
      */
-    inline std::vector<int64_t>& indices() const { return mAttributes->getAttr<GatherAttr::Indices>(); }
+    inline std::vector<int64_t>& indices() const { return mAttributes->getAttr<Attr::Indices>(); }
 
     /**
      * @brief Get the shape of the gathered tensor.
      * @return The gathered shape attribute.
      */
-    inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes->getAttr<GatherAttr::GatheredShape>(); }
+    inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes->getAttr<Attr::GatheredShape>(); }
 
     /**
      * @brief Get the input tensor names.
@@ -195,9 +189,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
-		return EnumStrings<Aidge::GatherAttr>::data;
-	}
+	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -219,5 +211,20 @@ std::shared_ptr<Node> Gather(std::int8_t axis = 0,
 
 } // namespace Aidge
 
+namespace {
+template <>
+struct EnumStrings<Aidge::Gather_Op::Attr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::Gather_Op::Attr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_GATHER_ATTR)
+};
+}
+
+constexpr const char* const* Aidge::Gather_Op::attributesName() {
+    return EnumStrings<Aidge::Gather_Op::Attr>::data;
+}
+
+#undef LIST_GATHER_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index 06642231152cefe1023688811da0dcdc0bbde859..2388cd0c17339ea50a0ea2f9047c8cdabb08a68e 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -23,21 +23,35 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/logger/EnumString.hpp"
 
+#define LIST_GRIDSAMPLE_ATTR(X)                  \
+    X(Mode, "mode", Mode),                       \
+    X(PaddingMode, "padding_mode", PaddingMode), \
+    X(AlignCorners, "align_corners", bool)
+
 namespace Aidge {
+/**
+ * @enum GridSampleAttr
+ * @brief Attributes for the GridSample operation.
+ *
+ * - Mode: Specifies the interpolation mode (e.g., Linear, Nearest, Cubic).
+ * - PaddingMode: Specifies how to handle out-of-boundary grid values.
+ * - AlignCorners: Determines whether grid values are normalized to align with the image corners.
+ */
 enum class GridSampleAttr {
-	Mode,			// Specifies the interpolation mode (e.g., Linear, Nearest, Cubic).
-	PaddingMode,	// Specifies how to handle out-of-boundary grid values.
-	AlignCorners	// Determines whether grid values are normalized to align with the image corners.
+    GENERATE_LIST_ATTR_ENUM(LIST_GRIDSAMPLE_ATTR)
 };
-} // namespace Aidge
+}  // namespace Aidge
+
 namespace {
-	template <>
-	const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
-		"mode",
-		"padding_mode",
-		"align_corners"
-	};
+template <>
+struct EnumStrings<Aidge::GridSampleAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_GRIDSAMPLE_ATTR)
+};
 }
+
 namespace Aidge {
 
 /**
@@ -88,7 +102,7 @@ public:
 	enum class PaddingMode { Zeros, Border, Reflection };
 
 private:
-	using Attributes_ = StaticAttributes<GridSampleAttr, Mode, PaddingMode, bool>;
+	using Attributes_ = StaticAttributes<GridSampleAttr, GENERATE_LIST_ATTR_TYPE(LIST_GRIDSAMPLE_ATTR)>;
 	template <GridSampleAttr e>
 	using attr = typename Attributes_::template attr<e>;
 	const std::shared_ptr<Attributes_> mAttributes;
@@ -185,7 +199,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::GridSampleAttr>::data;
 	}
 };
@@ -207,4 +221,6 @@ std::shared_ptr<Node> GridSample(
 
 } // namespace Aidge
 
+#undef LIST_GRIDSAMPLE_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_GRIDSAMPLE_H_ */
diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp
index 806ed47f3db5f78b5636f7f14876f852ea22b341..49f9059033b2816b594802b1fcfaa4340418f883 100644
--- a/include/aidge/operator/Heaviside.hpp
+++ b/include/aidge/operator/Heaviside.hpp
@@ -24,23 +24,10 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-enum class HeavisideAttr {
-    /**
-     * @brief The value used in the output tensor when the input is 0.
-     */
-    Value
-};
-} // namespace Aidge
-namespace {
-    /**
-     * @brief Define string representations for Heaviside attributes.
-     */
-    template <>
-    const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"};
-}
-namespace Aidge {
+#define LIST_HEAVISIDE_ATTR(X)  \
+    X(Value, "value", float)
 
+namespace Aidge {
 /**
  * @class Heaviside_Op
  * @brief Implements the Heaviside step function operation.
@@ -59,18 +46,26 @@ namespace Aidge {
 class Heaviside_Op
     : public OperatorTensor,
       public Registrable<Heaviside_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Heaviside_Op &)>> {
+public:
+    static const std::string Type;
 
-private:
-    using Attributes_ = StaticAttributes<HeavisideAttr, float>;
+    /**
+     * @enum Attr
+     * @brief Attributes for the Heaviside operation.
+     *
+     * - Value: The value used in the output tensor when the input is 0.
+     */
+    enum class Attr {
+        GENERATE_LIST_ATTR_ENUM(LIST_HEAVISIDE_ATTR)
+    };
 
-    template <HeavisideAttr e>
+private:
+    using Attributes_ = StaticAttributes<Attr, GENERATE_LIST_ATTR_TYPE(LIST_HEAVISIDE_ATTR)>;
+    template <Attr e>
     using attr = typename Attributes_::template attr<e>;
-
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
-    static const std::string Type;
-
     /**
      * @brief Constructor for the Heaviside operator.
      * @param[in] value The value to use in the output tensor when the input is 0.
@@ -123,9 +118,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
-		return EnumStrings<Aidge::HeavisideAttr>::data;
-	}
+	static constexpr const char* const* attributesName();
 
     /**
      * @brief Get the attributes of the operator.
@@ -139,7 +132,7 @@ public:
      * @return A reference to the value attribute.
      */
     inline float &value() const {
-        return mAttributes->template getAttr<HeavisideAttr::Value>();
+        return mAttributes->template getAttr<Attr::Value>();
     }
 };
 
@@ -158,5 +151,20 @@ std::shared_ptr<Node> Heaviside(float value, const std::string &name = "");
 
 } // namespace Aidge
 
+namespace {
+template <>
+struct EnumStrings<Aidge::Heaviside_Op::Attr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::Heaviside_Op::Attr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_HEAVISIDE_ATTR)
+};
+}
+
+constexpr const char* const* Aidge::Heaviside_Op::attributesName() {
+    return EnumStrings<Aidge::Heaviside_Op::Attr>::data;
+}
+
+#undef LIST_HEAVISIDE_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_HEAVISIDE_H_ */
diff --git a/include/aidge/operator/LRN.hpp b/include/aidge/operator/LRN.hpp
index 6c82b6b4670cff44e9d21aeabe8f64aa2b2e2397..b1cbc143dd592271ebb982a81eb9350b0ea04a70 100644
--- a/include/aidge/operator/LRN.hpp
+++ b/include/aidge/operator/LRN.hpp
@@ -23,21 +23,12 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-enum class LRNAttr {
-    Alpha,  ///< Scale factor for normalization.
-    Beta,   ///< Exponent applied to the normalization term.
-    Bias,   ///< Constant bias added to the normalization term.
-    Size    ///< Number of channels to normalize over.
-};
-} // namespace Aidge
-namespace {
-    /**
-     * @brief EnumStrings specialization for LRNAttr.
-     */
-    template <>
-    const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size", nullptr};
-}
+#define LIST_LRN_ATTR(X)          \
+    X(Alpha, "alpha", float),     \
+    X(Beta, "beta", float),       \
+    X(Bias, "bias", float),       \
+    X(Size, "size", std::int32_t)
+
 namespace Aidge {
 /**
  * @brief Description of a Local Response Normalization (LRN) operation on an input Tensor.
@@ -77,9 +68,22 @@ public:
      */
     static const std::string Type;
 
+    /**
+     * @enum Attr
+     * @brief Attributes for the Local Response Normalization (LRN) operation.
+     *
+     * - Alpha: Scale factor for normalization.
+     * - Beta: Exponent applied to the normalization term.
+     * - Bias: Constant bias added to the normalization term.
+     * - Size: Number of channels to normalize over.
+     */
+    enum class Attr {
+        GENERATE_LIST_ATTR_ENUM(LIST_LRN_ATTR)
+    };
+
 private:
-    using Attributes_ = StaticAttributes<LRNAttr, float, float, float, std::int32_t>;
-    template <LRNAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<Attr, GENERATE_LIST_ATTR_TYPE(LIST_LRN_ATTR)>;
+    template <Attr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -131,25 +135,25 @@ public:
      * @brief Get or modify the `alpha` attribute.
      * @return Reference to the `alpha` attribute.
      */
-    inline float& alpha() const noexcept { return mAttributes->getAttr<LRNAttr::Alpha>(); }
+    inline float& alpha() const noexcept { return mAttributes->getAttr<Attr::Alpha>(); }
 
     /**
      * @brief Get or modify the `beta` attribute.
      * @return Reference to the `beta` attribute.
      */
-    inline float& beta() const noexcept { return mAttributes->getAttr<LRNAttr::Beta>(); }
+    inline float& beta() const noexcept { return mAttributes->getAttr<Attr::Beta>(); }
 
     /**
      * @brief Get or modify the `bias` attribute.
      * @return Reference to the `bias` attribute.
      */
-    inline float& bias() const noexcept { return mAttributes->getAttr<LRNAttr::Bias>(); }
+    inline float& bias() const noexcept { return mAttributes->getAttr<Attr::Bias>(); }
 
     /**
      * @brief Get or modify the `size` attribute.
      * @return Reference to the `size` attribute.
      */
-    inline std::int32_t& size() const noexcept { return mAttributes->getAttr<LRNAttr::Size>(); }
+    inline std::int32_t& size() const noexcept { return mAttributes->getAttr<Attr::Size>(); }
 
     /**
      * @brief Get the input tensor names for the LRN operator.
@@ -171,9 +175,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
-		return EnumStrings<Aidge::LRNAttr>::data;
-	}
+	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -187,4 +189,20 @@ std::shared_ptr<Node> LRN(std::int32_t size, const std::string& name = "");
 
 } // namespace Aidge
 
+namespace {
+template <>
+struct EnumStrings<Aidge::LRN_Op::Attr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::LRN_Op::Attr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_LRN_ATTR)
+};
+}
+
+constexpr const char* const* Aidge::LRN_Op::attributesName() {
+    return EnumStrings<Aidge::LRN_Op::Attr>::data;
+}
+
+#undef LIST_LRN_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_LRN_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index acf9bae7f4955fee09699f27b7a23c06ce3d670e..867f324d3044cdc8ebd440dfebd5547f6936f47f 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -23,19 +23,9 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-enum class LeakyReLUAttr {
-    /**
-     * @brief Slope for the negative input values.
-     */
-    NegativeSlope
-};
-} // namespace Aidge
-namespace {
-    template <>
-    const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
-        = {"negative_slope"};
-    }
+#define LIST_LEAKYRELU_ATTR(X)  \
+    X(NegativeSlope, "negative_slope", float)
+
 namespace Aidge{
 /**
  * @class LeakyReLU_Op
@@ -57,9 +47,19 @@ class LeakyReLU_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
+    /**
+     * @enum LeakyReLUAttr
+     * @brief Attributes for the LeakyReLU operation.
+     *
+     * - NegativeSlope: Slope for the negative input values.
+     */
+    enum class Attr {
+        GENERATE_LIST_ATTR_ENUM(LIST_LEAKYRELU_ATTR)
+    };
+
 private:
-    using Attributes_ = StaticAttributes<LeakyReLUAttr, float>;
-    template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<Attr, GENERATE_LIST_ATTR_TYPE(LIST_LEAKYRELU_ATTR)>;
+    template <Attr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -77,7 +77,7 @@ public:
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(
             std::make_shared<Attributes_>(
-                attr<LeakyReLUAttr::NegativeSlope>(negativeSlope)))
+                attr<Attr::NegativeSlope>(negativeSlope)))
     {}
 
     /**
@@ -104,7 +104,7 @@ public:
     /**
      * @brief Get the negative slope value.
      */
-    inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
+    inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<Attr::NegativeSlope>(); }
 
     /**
      * @brief Get the names of the input tensors.
@@ -126,9 +126,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
-		return EnumStrings<Aidge::LeakyReLUAttr>::data;
-	}
+	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -139,6 +137,22 @@ public:
  * @return std::shared_ptr<Node> Node containing the Operator.
  */
 std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "");
+} // namespace Aidge
+
+namespace {
+template <>
+struct EnumStrings<Aidge::LeakyReLU_Op::Attr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::LeakyReLU_Op::Attr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_LEAKYRELU_ATTR)
+};
+}
+
+constexpr const char* const* Aidge::LeakyReLU_Op::attributesName() {
+    return EnumStrings<Attr>::data;
 }
 
+#undef LIST_LEAKYRELU_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_LEAKYRELU_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index d90aab4a0b7581a5d1e2c7eaf6fb295e51953af4..01104262147dc461259c3de17e3b3ec3383328b4 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -28,48 +28,47 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+// Define the X‑macro list with three parameters: name, string, and type.
+
+#define LIST_MAXPOOLING_ATTR(X)                     \
+    X(KernelDims, "kernel_dims", sizeArr_t<DIM>),   \
+    X(StrideDims, "stride_dims", sizeArr_t<DIM>),   \
+    X(Dilations,  "dilations",   sizeArr_t<DIM>),   \
+    X(CeilMode,   "ceil_mode",   bool)
+
 namespace Aidge {
 
 /**
- * @enum MaxPoolingAttr
- * @brief Attributes defining the configuration of a MaxPooling operation.
+ * @enum Attr
+ * @brief Attributes defining the configuration of a MaxPooling Operator.
+ *
+ * - **KernelDims**: Kernel dimensions specifying the size of the pooling window for each spatial dimension.
+ *   Must be an array of positive integers. Common examples include [2,2] or [3,3].
+ * - **StrideDims**: Stride dimensions for sliding the pooling window across the input.
+ *   The stride specifies how much the window moves after each operation.
+ *   Must be an array of positive integers. For example, [1,1] or [2,2].
+ * - **Dilations**: Dilation along each spatial axis. Default value is 1 for all axes.
+ *   Must be an array of positive integers. For example, [1,1].
+ * - **CeilMode**: Flag indicating whether to use ceil or floor when calculating output size.
+ *   - `true`: Use `ceil` for output size calculation.
+ *   - `false`: Use `floor` for output size calculation.
  */
 enum class MaxPoolingAttr {
-  /**
-   * @brief Stride dimensions for sliding the pooling window across the input dimensions.
-   * The stride specifies how much the window moves after each operation.
-   * Must be positive integers.
-   */
-  StrideDims,
-  /**
-   * @brief Dilation along each spatial axis. Default value is 1.
-   */
-  Dilations,
-  /**
-   * @brief Kernel dimensions specifying the size of the pooling window for each spatial dimension.
-   * For example, common kernel dimensions include 2x2 or 3x3.
-   * Must be positive integers.
-   */
-  KernelDims,
-
-  /**
-   * @brief Flag indicating whether to use ceil or floor when calculating output size.
-   * - `true`: Use `ceil` for output size calculation.
-   * - `false`: Use `floor` for output size calculation.
-   */
-  CeilMode,
+    GENERATE_LIST_ATTR_ENUM(LIST_MAXPOOLING_ATTR)
 };
 } // namespace Aidge
-namespace {
-    /**
-     * @brief String representations of MaxPooling attributes for debugging and logging.
-     */
-    template <>
-    const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "dilations", "ceil_mode"};
-    }
 
-namespace Aidge{
+namespace {
+template <>
+struct EnumStrings<Aidge::MaxPoolingAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_MAXPOOLING_ATTR)
+};
+}
 
+namespace Aidge {
 /**
  * @class MaxPooling_Op
  * @tparam DIM Dimensionality of the input tensor (e.g., 1D, 2D, 3D).
@@ -107,13 +106,10 @@ class MaxPooling_Op : public OperatorTensor,
 public:
     static const std::string Type; ///< Static identifier for this operator type.
 
-    using Attributes_ = StaticAttributes<MaxPoolingAttr,
-                                         std::array<DimSize_t, DIM>,
-                                         std::array<DimSize_t, DIM>,
-                                         std::array<DimSize_t, DIM>,
-                                         bool>;
-
 private:
+    using Attributes_ = StaticAttributes<MaxPoolingAttr,
+                                GENERATE_LIST_ATTR_TYPE(LIST_MAXPOOLING_ATTR)
+            >;
     template <MaxPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes; ///< Shared pointer to operator attributes.
@@ -213,7 +209,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::MaxPoolingAttr>::data;
 	}
 };
@@ -265,5 +261,6 @@ inline std::shared_ptr<Node> MaxPooling(
 
 }  // namespace Aidge
 
+#undef LIST_MAXPOOLING_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 59df17ec146bb33dc1e6e8c007eb275054fd727b..e1eea4a284f494553708fa56f99477162eab93ab 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -114,24 +114,13 @@ public:
      */
     void forward() override;
 };
-
-enum class MemorizeAttr {
-    ScheduleStep,   // Defines the step interval for scheduling memory updates.
-    ForwardStep,    // Tracks the current step in the forward pass.
-    EndStep         // The final step for which memory updates will occur.
-};
 } // namespace Aidge
-namespace {
-    /**
-     * @brief String representations of the Memorize operator's attributes.
-     */
-    template <>
-    const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
-        "schedule_step",
-        "forward_step",
-        "end_step"
-    };
-}
+
+#define LIST_MEMORIZE_ATTR(X)                        \
+    X(ScheduleStep, "schedule_step", std::uint32_t), \
+    X(ForwardStep, "forward_step", std::uint32_t),   \
+    X(EndStep, "end_step", std::uint32_t)
+
 namespace Aidge {
 /**
  * @class Memorize_Op
@@ -155,9 +144,21 @@ class Memorize_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
+    /**
+     * @enum Attr
+     * @brief Attributes for the Memorize operation.
+     *
+     * - ScheduleStep: Defines the step interval for scheduling memory updates.
+     * - ForwardStep: Tracks the current step in the forward pass.
+     * - EndStep: The final step for which memory updates will occur.
+     */
+    enum class Attr {
+        GENERATE_LIST_ATTR_ENUM(LIST_MEMORIZE_ATTR)
+    };
+
 private:
-    using Attributes_ = StaticAttributes<MemorizeAttr, std::uint32_t, std::uint32_t, std::uint32_t>;
-    template <MemorizeAttr e>
+    using Attributes_ = StaticAttributes<Attr, GENERATE_LIST_ATTR_TYPE(LIST_MEMORIZE_ATTR)>;
+    template <Attr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -223,19 +224,19 @@ public:
      * @brief Get or set the scheduling step for the operator.
      * @return A reference to the scheduling step.
      */
-    inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<MemorizeAttr::ScheduleStep>(); }
+    inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<Attr::ScheduleStep>(); }
 
     /**
      * @brief Get or set the forward step counter for the operator.
      * @return A reference to the forward step counter.
      */
-    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<MemorizeAttr::ForwardStep>(); }
+    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<Attr::ForwardStep>(); }
 
     /**
      * @brief Get or set the end step defining the memory duration.
      * @return A reference to the end step value.
      */
-    inline std::uint32_t& endStep() const { return mAttributes->template getAttr<MemorizeAttr::EndStep>(); }
+    inline std::uint32_t& endStep() const { return mAttributes->template getAttr<Attr::EndStep>(); }
 
     /**
      * @brief Retrieve the names of the operator's input tensors.
@@ -257,9 +258,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
-		return EnumStrings<Aidge::MemorizeAttr>::data;
-	}
+	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -271,5 +270,20 @@ public:
 std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "");
 }  // namespace Aidge
 
+namespace {
+template <>
+struct EnumStrings<Aidge::Memorize_Op::Attr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::Memorize_Op::Attr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_MEMORIZE_ATTR)
+};
+}
+
+constexpr const char* const* Aidge::Memorize_Op::attributesName() {
+    return EnumStrings<Aidge::Memorize_Op::Attr>::data;
+}
+
+#undef LIST_MEMORIZE_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_MEMORIZE_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index dd59af175231acb274126d7f396cdd502046b004..81a54620a6f325eba04e9055e12d73d6a5d64163 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -31,6 +31,16 @@
 #ifdef PYBIND
 namespace py = pybind11;
 #endif
+
+
+#define SELECT_ENUM_FOR_ATTR(name, str, type) name
+#define SELECT_STR_FOR_ATTR(name, str, type) str
+#define SELECT_TYPE_FOR_ATTR(name, str, type) type
+
+#define GENERATE_LIST_ATTR_ENUM(LIST_ATTRS) LIST_ATTRS(SELECT_ENUM_FOR_ATTR)
+#define GENERATE_LIST_ATTR_STR(LIST_ATTRS) LIST_ATTRS(SELECT_STR_FOR_ATTR)
+#define GENERATE_LIST_ATTR_TYPE(LIST_ATTRS) LIST_ATTRS(SELECT_TYPE_FOR_ATTR)
+
 namespace Aidge {
 
 /**
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 0880b2c97ed7e2e6e9e4515c82c37aa4e0e91233..491a8a3697de5c685d6c9130423dd290e4b6cf71 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -24,17 +24,25 @@
 #include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
+#define LIST_PAD_ATTR(X)                                                \
+    X(BeginEndBorders, "begin_end_borders", Aidge::sizeArr_t<2 * DIM>), \
+    X(BorderType, "border_type", PadBorderType),                        \
+    X(BorderValue, "border_value", double)
+
 
+namespace Aidge {
 /**
  * @enum PadAttr
  * @brief Attributes for the Pad operator.
+ *
+ * - BeginEndBorders: Specifies the padding sizes for the beginning and end of each dimension.
+ * - BorderType: Type of border handling during padding.
+ * - BorderValue: Value to be used for constant padding.
  */
 enum class PadAttr {
-    BeginEndBorders, ///< Specifies the padding sizes for the beginning and end of each dimension.
-    BorderType,      ///< Type of border handling during padding.
-    BorderValue      ///< Value to be used for constant padding.
+    GENERATE_LIST_ATTR_ENUM(LIST_PAD_ATTR)
 };
+
 /**
  * @enum PadBorderType
  * @brief Types of border handling available for padding.
@@ -46,19 +54,16 @@ enum class PadBorderType {
     Wrap,     ///< Values wrap around the tensor dimensions.
     Zero      ///< All out-of-bound values are set to 0.
 };
-
 } // namespace Aidge
 
 namespace {
-    /**
-     * @brief EnumStrings specialization for PadAttr.
-     */
-    template <>
-    const char* const EnumStrings<Aidge::PadAttr>::data[] = {
-        "begin_end_borders",
-        "border_type",
-        "border_value"
-    };
+template <>
+struct EnumStrings<Aidge::PadAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::PadAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_PAD_ATTR)
+};
 
 /**
  * @brief EnumStrings specialization for PadBorderType.
@@ -131,11 +136,7 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<PadAttr,
-                                         std::array<DimSize_t, 2 * DIM>, // Padding for start and end of each dimension.
-                                         PadBorderType,                  // Border handling type.
-                                         double                          // Border value for constant padding.
-                                         >;
+    using Attributes_ = StaticAttributes<PadAttr, GENERATE_LIST_ATTR_TYPE(LIST_PAD_ATTR)>;
     template <PadAttr e>
     using attr = typename Attributes_::template attr<e>;
 
@@ -247,7 +248,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::PadAttr>::data;
 	}
 };
@@ -284,6 +285,6 @@ inline std::shared_ptr<Node> Pad(
 extern template class Aidge::Pad_Op<1>;
 extern template class Aidge::Pad_Op<2>;
 
-
+#undef LIST_PAD_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index d9d52f9bcd07a671d68e3db53c378c9ee6659c8e..9790f05e9375435f7adf2dfbf3fe0460487416fc 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -92,25 +92,35 @@ public:
      */
     void backward() override;
 };
+} //namespace Aidge
 
+#define LIST_POP_ATTR(X)  \
+    X(ForwardStep, "forward_step", std::uint32_t),  \
+    X(BackwardStep, "backward_step", std::uint32_t)
+
+namespace Aidge {
 /**
  * @enum PopAttr
  * @brief Attributes specific to the `Pop` operator.
+ *
+ * - ForwardStep: Tracks the current step in the forward pass.
+ * - BackwardStep: Tracks the current step in the backward pass.
  */
 enum class PopAttr {
-    ForwardStep,    // Tracks the current step in the forward pass
-    BackwardStep    // Tracks the current step in the backward pass
+    GENERATE_LIST_ATTR_ENUM(LIST_POP_ATTR)
 };
-} // namespace Aidge
+}  // namespace Aidge
+
 namespace {
-    /**
-     * @brief String representations of the `Pop` operator's attributes.
-     */
-    template <>
-    const char *const EnumStrings<Aidge::PopAttr>::data[] = {
-        "forward_step", "backward_step"
-    };
+template <>
+struct EnumStrings<Aidge::PopAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::PopAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_POP_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @class Pop_Op
@@ -131,7 +141,7 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<PopAttr, std::uint32_t, std::uint32_t>;
+    using Attributes_ = StaticAttributes<PopAttr, GENERATE_LIST_ATTR_TYPE(LIST_POP_ATTR)>;
     template <PopAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -226,7 +236,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::PopAttr>::data;
 	}
 };
@@ -239,5 +249,6 @@ public:
 std::shared_ptr<Node> Pop(const std::string& name = "");
 }  // namespace Aidge
 
+#undef LIST_POP_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_POP_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 3690579d34373b64eec20042b7f9615266c15aee..ae88c0c714ec5a1ce3a5b39e290d2566e49d9f4b 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -28,21 +28,29 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
-namespace Aidge {
 
+#define LIST_PRODUCER_ATTR(X) X(Constant, "constant", bool)
+
+namespace Aidge {
 /**
- * @enum ProdAttr
+ * @enum ProducerAttr
  * @brief Attributes specific to the `Producer_Op` class.
  */
-enum class ProdAttr { Constant };
+enum class ProducerAttr {
+    GENERATE_LIST_ATTR_ENUM(LIST_PRODUCER_ATTR)
+};
 } // namespace Aidge
+
 namespace {
-    /**
-     * @brief Enum string representation for `ProdAttr`.
-     */
-    template <>
-    const char* const EnumStrings<Aidge::ProdAttr>::data[] = {"constant"};
+template <>
+struct EnumStrings<Aidge::ProducerAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::ProducerAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_PRODUCER_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @class Producer_Op
@@ -76,8 +84,10 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ProdAttr, bool>;
-    template <ProdAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<ProducerAttr,
+            GENERATE_LIST_ATTR_TYPE(LIST_PRODUCER_ATTR)
+        >;
+    template <ProducerAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -160,7 +170,7 @@ public:
      *
      * @return A reference to the constant attribute.
      */
-    inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
+    inline bool& constant() const { return mAttributes->template getAttr<ProducerAttr::Constant>(); }
 
     /**
      * @brief Performs the forward operation for the operator.
@@ -280,4 +290,6 @@ std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOInde
 
 } // namespace Aidge
 
+#undef LIST_PRODUCER_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 3ee4a1bec40f7f6aa409308708bc3338174c652b..cdb139f96f4bb33b9a22479a2f996d71abf85f0e 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -25,41 +25,11 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-enum class ReduceMeanAttr {
-  /**
-   * @brief Axes over which the mean operation is performed.
-   *
-   * Axes are specified as a vector of integers, each representing a dimension
-   * of the input tensor to be reduced.
-   */
-  Axes,
-
-  /**
-   * @brief Flag indicating whether to keep reduced dimensions.
-   *
-   * - `true`: Retain reduced dimensions with size 1.
-   * - `false`: Completely remove reduced dimensions.
-   */
-  KeepDims,
-
-  /**
-   * @brief Flag indicating behavior when axes are empty.
-   *
-   * - `true`: No operation is performed if axes are empty.
-   * - `false`: Reduction is performed over all axes if none are specified.
-   */
-  NoopWithEmptyAxes
-};
-} // namespace Aidge
-namespace {
-    template <>
-    const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {
-        "axes",
-        "keep_dims",
-        "noop_with_empty_axes"
-    };
-}
+#define LIST_REDUCEMEAN_ATTR(X)  \
+    X(Axes, "axes", std::vector<std::int32_t>),  \
+    X(KeepDims, "keep_dims", bool),  \
+    X(NoopWithEmptyAxes, "noop_with_empty_axes", bool)
+
 namespace Aidge {
 /**
  * @class ReduceMean_Op
@@ -94,13 +64,26 @@ class ReduceMean_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
+    /**
+     * @enum Attr
+     * @brief Defines attributes for the ReduceMean operation.
+     *
+     * - **Axes**: Specifies the dimensions along which the mean is computed.
+     * - **KeepDims**: Determines whether the reduced dimensions are preserved.
+     *   - `true`: Retains reduced dimensions with a size of 1.
+     *   - `false`: Removes reduced dimensions from the output.
+     * - **NoopWithEmptyAxes**: Defines behavior when no axes are provided.
+     *   - `true`: The operation is skipped if no axes are specified.
+     *   - `false`: The reduction is applied across all dimensions.
+     */
+    enum class Attr {
+        GENERATE_LIST_ATTR_ENUM(LIST_REDUCEMEAN_ATTR)
+    };
+
 private:
-    using Attributes_ = StaticAttributes<ReduceMeanAttr,
-                                            std::vector<std::int32_t>,
-                                            bool,
-                                            bool>;
+    using Attributes_ = StaticAttributes<Attr, GENERATE_LIST_ATTR_TYPE(LIST_REDUCEMEAN_ATTR)>;
 
-    template <ReduceMeanAttr e>
+    template <Attr e>
     using attr = typename Attributes_::template attr<e>;
 
     const std::shared_ptr<Attributes_> mAttributes;
@@ -154,17 +137,17 @@ public:
     /**
      * @brief Get the axes over which the mean is computed.
      */
-    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); }
+    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<Attr::Axes>(); }
 
     /**
      * @brief Get whether reduced dimensions are retained.
      */
-    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<Attr::KeepDims>(); }
 
     /**
      * @brief Get the behavior when axes are empty.
      */
-    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::NoopWithEmptyAxes>(); }
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<Attr::NoopWithEmptyAxes>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
@@ -178,9 +161,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ReduceMeanAttr>::data;
-	}
+	static constexpr const char* const* attributesName();
 
     virtual ~ReduceMean_Op() noexcept;
 };
@@ -203,5 +184,20 @@ std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
 
 }  // namespace Aidge
 
+namespace {
+template <>
+struct EnumStrings<Aidge::ReduceMean_Op::Attr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::ReduceMean_Op::Attr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_REDUCEMEAN_ATTR)
+};
+}
+
+constexpr const char* const* Aidge::ReduceMean_Op::attributesName(){
+    return EnumStrings<Aidge::ReduceMean_Op::Attr>::data;
+}
+
+#undef LIST_REDUCEMEAN_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index adb58f895cf3fbfa67b84c518a7f6cedf09d1a19..73f59c25d43e8c78cfd9feb42eefcfd94f8680a1 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -19,44 +19,16 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/operator/Producer.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
-enum class ReduceSumAttr {
-/**
-   * @brief Axes over which the mean operation is performed.
-   *
-   * Axes are specified as a vector of integers, each representing a dimension
-   * of the input tensor to be reduced.
-   */
-  Axes,
-
-  /**
-   * @brief Flag indicating whether to keep reduced dimensions.
-   *
-   * - `true`: Retain reduced dimensions with size 1.
-   * - `false`: Completely remove reduced dimensions.
-   */
-  KeepDims,
-
-  /**
-   * @brief Flag indicating behavior when axes are empty.
-   *
-   * - `true`: No operation is performed if axes are empty.
-   * - `false`: Reduction is performed over all axes if none are specified.
-   */
-  NoopWithEmptyAxes
-};
+#define LIST_REDUCESUM_ATTR(X)  \
+    X(Axes, "axes", std::vector<std::int32_t>),  \
+    X(KeepDims, "keep_dims", bool),  \
+    X(NoopWithEmptyAxes, "noop_with_empty_axes", bool)
 
-} // namespace Aidge
-namespace {
-    template <>
-    const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
-}
 namespace Aidge {
 /**
  * @class ReduceSum_Op
@@ -91,12 +63,25 @@ class ReduceSum_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
+    /**
+     * @enum Attr
+     * @brief Defines attributes for the ReduceSum operation.
+     *
+     * - **Axes**: Specifies the dimensions along which the sum is computed.
+     * - **KeepDims**: Determines whether the reduced dimensions are preserved.
+     *   - `true`: Retains reduced dimensions with a size of 1.
+     *   - `false`: Removes reduced dimensions from the output.
+     * - **NoopWithEmptyAxes**: Defines behavior when no axes are provided.
+     *   - `true`: The operation is skipped if no axes are specified.
+     *   - `false`: The reduction is applied across all dimensions.
+     */
+    enum class Attr {
+        GENERATE_LIST_ATTR_ENUM(LIST_REDUCESUM_ATTR)
+    };
+
 private:
-    using Attributes_ = StaticAttributes<ReduceSumAttr,
-                                            std::vector<std::int32_t>,
-                                            bool,
-                                            bool>;
-    template <ReduceSumAttr e>
+    using Attributes_ = StaticAttributes<Attr, GENERATE_LIST_ATTR_TYPE(LIST_REDUCESUM_ATTR)>;
+    template <Attr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -114,9 +99,9 @@ public:
     ReduceSum_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
-            attr<ReduceSumAttr::Axes>(axes),
-            attr<ReduceSumAttr::KeepDims>(keep_dims),
-            attr<ReduceSumAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
+            attr<Attr::Axes>(axes),
+            attr<Attr::KeepDims>(keep_dims),
+            attr<Attr::NoopWithEmptyAxes>(noop_with_empty_axes)))
     {}
 
     /**
@@ -157,17 +142,17 @@ public:
     /**
      * @brief Get the axes over which the mean is computed.
      */
-    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::Axes>(); }
+    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<Attr::Axes>(); }
 
     /**
      * @brief Get whether reduced dimensions are retained.
      */
-    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::KeepDims>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<Attr::KeepDims>(); }
 
     /**
      * @brief Get the behavior when axes are empty.
      */
-    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::NoopWithEmptyAxes>(); }
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<Attr::NoopWithEmptyAxes>(); }
 
 
     static const std::vector<std::string> getInputsName() {
@@ -181,9 +166,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
-		return EnumStrings<Aidge::ReduceSumAttr>::data;
-	}
+	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -208,4 +191,20 @@ inline std::shared_ptr<Node> ReduceSum(const std::vector<std::int32_t> &axes={},
 }
 }  // namespace Aidge
 
+namespace {
+template <>
+struct EnumStrings<Aidge::ReduceSum_Op::Attr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::ReduceSum_Op::Attr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_REDUCESUM_ATTR)
+};
+}
+
+constexpr const char* const* Aidge::ReduceSum_Op::attributesName() {
+    return EnumStrings<Aidge::ReduceSum_Op::Attr>::data;
+}
+
+#undef LIST_REDUCESUM_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index e69c42d4d98974e7bb00acbf17581cd56ada1331..f02dae45e8285e7187ca7f739c163e69bee7c81c 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -43,32 +43,37 @@ public:
     void forward() override;
     void backward() override;
 };
+} // namespace Aidge
+
+
+#define LIST_RESHAPE_ATTR(X)  \
+    X(Shape, "shape", std::vector<std::int64_t>),  \
+    X(AllowZero, "allow_zero", bool)
 
+
+namespace Aidge {
 /**
  * @enum ReshapeAttr
  * @brief Enumeration of attributes specific to the Reshape operator.
+ *
+ * - **Shape**: The target shape for the output tensor.
+ * - **AllowZero**: When true, zeros in the target shape retain the corresponding dimension size from the input tensor.
  */
 enum class ReshapeAttr {
-    /**
-     * @brief The target shape for the output tensor.
-     */
-    Shape,
-
-    /**
-     * @brief Whether zeros in the shape attribute are allowed.
-     *
-     * When true, zeros in the target shape retain the corresponding dimension size from the input tensor.
-     */
-    AllowZero
+    GENERATE_LIST_ATTR_ENUM(LIST_RESHAPE_ATTR)
 };
-} // namespace Aidge
+}  // namespace Aidge
+
 namespace {
-    /**
-     * @brief EnumStrings specialization for ReshapeAttr.
-     */
-    template <>
-    const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = {"shape", "allow_zero"};
+template <>
+struct EnumStrings<Aidge::ReshapeAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::ReshapeAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_RESHAPE_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @brief Description of Reshape operator that adjusts the shape of the input tensor.
@@ -94,7 +99,7 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ReshapeAttr, std::vector<std::int64_t>, bool>;
+    using Attributes_ = StaticAttributes<ReshapeAttr, GENERATE_LIST_ATTR_TYPE(LIST_RESHAPE_ATTR)>;
     template <ReshapeAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -189,7 +194,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::ReshapeAttr>::data;
 	}
 };
@@ -208,5 +213,6 @@ std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
 
 }  // namespace Aidge
 
+#undef LIST_RESHAPE_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_RESHAPE_H_ */
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 37d42fcc861db42c991a6e7f4296d725d002aad5..32ddbe48804e359a9a868a149e66c43342b76d56 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -25,30 +25,38 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-namespace Aidge {
 
-/* @brief attributes for the aidge operator */
+#define LIST_RESIZE_ATTR(X) \
+    X(CoordinateTransformationMode, "coordinate_transformation_mode", Interpolation::CoordinateTransformation), \
+    X(CubicCoeffA, "cubic_coeff_a", float), \
+    X(InterpolationMode, "interpolation_mode", Interpolation::Mode), \
+    X(PaddingMode, "padding_mode", PadBorderType)
+
+namespace Aidge {
+/**
+ * @enum ResizeAttr
+ * @brief Attributes for the Resize operation.
+ *
+ * - CoordinateTransformationMode: Defines how source coordinates map to target coordinates.
+ * - CubicCoeffA: Coefficient used in cubic interpolation.
+ * - InterpolationMode: Defines the interpolation method used.
+ * - PaddingMode: Specifies how padding is handled.
+ */
 enum class ResizeAttr {
-    //   antialias,
-    // axes,
-    CoordinateTransformationMode,
-    CubicCoeffA,
-    // excludeOutside,
-    //   extrapolation_value,
-    //   keep_aspect_ratio_policy,
-    InterpolationMode,
-    PaddingMode
+    GENERATE_LIST_ATTR_ENUM(LIST_RESIZE_ATTR)
 };
 } // namespace Aidge
+
 namespace {
-    template <>
-    const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
-        "coordinate_transformation_mode",
-        "cubic_coeff_a",
-        "interpolation_mode",
-        "padding_mode"
-    };
+template <>
+struct EnumStrings<Aidge::ResizeAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::ResizeAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_RESIZE_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @brief Resize operator, will up/downscale a given tensor given the input.
@@ -98,18 +106,15 @@ class Resize_Op
           std::string,
           std::function<std::shared_ptr<OperatorImpl>(const Resize_Op &)>> {
 
-  private:
+private:
     using Attributes_ =
         StaticAttributes<ResizeAttr,
-                         Interpolation::CoordinateTransformation,
-                         float,
-                         Interpolation::Mode,
-                         PadBorderType>;
+                         GENERATE_LIST_ATTR_TYPE(LIST_RESIZE_ATTR)>;
     template <ResizeAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
-  public:
+public:
     static const std::string Type;
     /**
      * @brief creates a resize operator
@@ -206,7 +211,7 @@ class Resize_Op
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::ResizeAttr>::data;
 	}
 };
@@ -240,4 +245,6 @@ Resize(std::vector<float> scale = std::vector<float>(),
 
 } // namespace Aidge
 
+#undef LIST_RESIZE_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_RESIZE_H_ */
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index fb342d34580092febaf3d1e63ea78247c3e8f77a..c5264fe551bf6ab0d18010b37bb66782170cee74 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -26,37 +26,35 @@
 // Caution: This operator is now deprecated and should no longer be used.
 // It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
 
+#define LIST_SCALING_ATTR(X) \
+    X(ScalingFactor, "scaling_factor", float), \
+    X(QuantizedNbBits, "quantized_nb_bits", std::size_t), \
+    X(IsOutputUnsigned, "is_output_unsigned", bool)
+
 namespace Aidge {
+/**
+ * @enum ScalingAttr
+ * @brief Attributes for the Scaling operation.
+ *
+ * - ScalingFactor: Floating-point scaling factor applied to the input tensor.
+ * - QuantizedNbBits: Specifies the bit-width used for quantization.
+ * - IsOutputUnsigned: Indicates whether the quantized output values are unsigned.
+ */
 enum class ScalingAttr {
-    /**
-     * @brief Scaling factor applied to the input tensor.
-     *
-     * This floating-point value is used to scale the input tensor.
-     */
-    ScalingFactor,
-
-    /**
-     * @brief Number of quantization bits.
-     *
-     * Specifies the bit-width used for quantization.
-     * For example, a value of `8` represents 8-bit quantization.
-     */
-    QuantizedNbBits,
-
-    /**
-     * @brief Indicates whether the output is unsigned.
-     *
-     * - `true`: The quantized output values are unsigned integers.
-     * - `false`: The quantized output values are signed integers.
-     */
-    IsOutputUnsigned
+    GENERATE_LIST_ATTR_ENUM(LIST_SCALING_ATTR)
 };
 } // namespace Aidge
+
 namespace {
-    template <>
-    const char* const EnumStrings<Aidge::ScalingAttr>::data[]
-        = {"scaling_factor", "quantized_nb_bits", "is_output_unsigned"};
+template <>
+struct EnumStrings<Aidge::ScalingAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::ScalingAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_SCALING_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @brief Description of a scaling operation to scale and quantize input tensors.
@@ -82,7 +80,7 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ScalingAttr, float, std::size_t, bool>;
+    using Attributes_ = StaticAttributes<ScalingAttr, GENERATE_LIST_ATTR_TYPE(LIST_SCALING_ATTR)>;
     template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -145,7 +143,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::ScalingAttr>::data;
 	}
 };
@@ -165,5 +163,6 @@ std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
                                      const std::string& name = "");
 } // namespace Aidge
 
+#undef LIST_SCALING_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 2a553fb827fc8a8d4b03fa06ebcd8825ae2ed64f..290d95eefd7972dad3d0ed05a01eb7105f5f9a62 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -47,29 +47,36 @@ public:
      */
     void forward() override;
 };
+}
+
+#define LIST_SHAPE_ATTR(X) \
+    X(Start, "start", std::int64_t), \
+    X(End, "end", std::int64_t)
 
+namespace Aidge {
 /**
  * @enum ShapeAttr
  * @brief Enumeration of attributes specific to the Shape operator.
+ *
+ * - Start: Start index of the slice of dimensions to return.
+ * - End: End index of the slice of dimensions to return (exclusive).
  */
 enum class ShapeAttr {
-    /**
-     * @brief Start index of the slice of dimensions to return.
-     */
-    Start,
-    /**
-     * End index of the slice of dimensions to return (exclusive).
-     */
-    End
+    GENERATE_LIST_ATTR_ENUM(LIST_SHAPE_ATTR)
 };
 } // namespace Aidge
+
 namespace {
-    /**
-     * @brief EnumStrings specialization for ShapeAttr.
-     */
-    template <>
-    const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"start", "end"};
+/// @brief EnumStrings specialization for ShapeAttr.
+template <>
+struct EnumStrings<Aidge::ShapeAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::ShapeAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_SHAPE_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @brief Description of the operation of extracting the shape of a tensor.
@@ -92,7 +99,7 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ShapeAttr, std::int64_t, std::int64_t>;
+    using Attributes_ = StaticAttributes<ShapeAttr, GENERATE_LIST_ATTR_TYPE(LIST_SHAPE_ATTR)>;
     template <ShapeAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -176,7 +183,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::ShapeAttr>::data;
 	}
 };
@@ -193,6 +200,6 @@ std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end
 
 } // namespace Aidge
 
-
+#undef LIST_SHAPE_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_SHAPE_H_ */
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index fa21b3d197551e54a95fe29dbb8e3f83d30865af..b425fe75208b37105ce6baadd4f2ff63f94f2f3c 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -44,51 +44,43 @@ public:
      */
     void forward() override;
 };
+}  // namespace Aidge
+
+#define LIST_SLICE_ATTR(X) \
+    X(Starts, "starts", std::vector<std::int64_t>), \
+    X(Ends, "ends", std::vector<std::int64_t>), \
+    X(Axes, "axes", std::vector<std::int8_t>), \
+    X(Steps, "steps", std::vector<std::int64_t>)
 
+namespace Aidge {
 /**
  * @enum SliceAttr
  * @brief Attributes for the Slice operation.
+ *
+ * - Starts: Starting indices for the slice along each axis.
+ *   - If index is < 0, the input tensor's rank is added.
+ *   - If index is still < 0, it is forced to 0.
+ *   - If index > dim, it is forced to dim.
+ * - Ends: Ending indices for the slice along each axis (exclusive).
+ *   - Follows the same adjustment rules as Starts.
+ * - Axes: Axes along which the slice operation is performed.
+ * - Steps: Steps to move between each slice along each axis.
  */
 enum class SliceAttr {
-    /**
-     * @brief Starting indices for the slice along each axis.
-     *
-     * Specifies the start position for slicing for each axis.
-     * @details if index is < 0 then the input tansor's rank is added.
-     * After, if index is < 0 then it is forced to 0,
-     * if index > dim then index is forced to dim.
-     */
-    Starts,
-
-    /**
-     * @brief Ending indices for the slice along each axis.
-     *
-     * Specifies the end position (exclusive) for slicing for each axis.
-     * @details if index is < 0 then the input tansor's rank is added.
-     * After, if index is < 0 then it is forced to 0,
-     * if index > dim then index is forced to dim.
-     */
-    Ends,
-
-    /**
-     * @brief Axes along which the slice operation is performed.
-     *
-     * Specifies which dimensions of the input tensor are affected by the slice.
-     */
-    Axes,
-
-    /**
-     * @brief Steps to move between each slice along each axis.
-     *
-     * Specifies the step size for slicing along each axis.
-     */
-    Steps
+    GENERATE_LIST_ATTR_ENUM(LIST_SLICE_ATTR)
 };
 } // namespace Aidge
+
 namespace {
-    template <>
-    const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" };
+template <>
+struct EnumStrings<Aidge::SliceAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::SliceAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_SLICE_ATTR)
+};
 }
+
 namespace Aidge{
 /**
  * @class Slice_Op
@@ -125,19 +117,10 @@ class Slice_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    /**
-     * @brief Defines static attributes for the Slice operator.
-     */
-    using Attributes_ = StaticAttributes<SliceAttr,
-                                            std::vector<std::int64_t>, // Starts
-                                            std::vector<std::int64_t>, // Ends
-                                            std::vector<std::int8_t>,  // Axes
-                                            std::vector<std::int64_t>>; // Steps
-
 private:
+    using Attributes_ = StaticAttributes<SliceAttr, GENERATE_LIST_ATTR_TYPE(LIST_SLICE_ATTR)>;
     template <SliceAttr e>
     using attr = typename Attributes_::template attr<e>;
-
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -213,7 +196,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::SliceAttr>::data;
 	}
 };
@@ -236,4 +219,6 @@ std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
 
 }  // namespace Aidge
 
+#undef LIST_SLICE_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_SLICE_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 86e1a57e70c4b7070b9af279980b2d5344a2f6f0..b0c6a2edae7ab9bec4a5f45746f2bc9258b6eb29 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -23,24 +23,36 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+#define LIST_SOFTMAX_ATTR(X) \
+    X(Axis, "axis", std::int32_t)
+
 namespace Aidge {
+/**
+ * @enum SoftmaxAttr
+ * @brief Attributes for the Softmax operation.
+ *
+ * - Axis: Axis along which the softmax operation is applied.
+ *   - Determines the dimension in the input tensor over which the softmax
+ *     operation will compute normalized exponential values.
+ */
 enum class SoftmaxAttr {
-    /**
-     * @brief Axis along which the softmax operation is applied.
-     *
-     * Determines the dimension in the input tensor over which the softmax
-     * operation will compute normalized exponential values.
-     */
-    Axis
+    GENERATE_LIST_ATTR_ENUM(LIST_SOFTMAX_ATTR)
 };
 } // namespace Aidge
+
 namespace {
-    /**
-     * @brief EnumStrings specialization for SoftmaxAttr.
-     */
-    template <>
-    const char* const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"};
+/**
+ * @brief EnumStrings specialization for SoftmaxAttr.
+ */
+template <>
+struct EnumStrings<Aidge::SoftmaxAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::SoftmaxAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_SOFTMAX_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @brief Description of a Softmax operation on input Tensor along a specified axis.
@@ -68,7 +80,7 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<SoftmaxAttr, std::int32_t>;
+    using Attributes_ = StaticAttributes<SoftmaxAttr, GENERATE_LIST_ATTR_TYPE(LIST_SOFTMAX_ATTR)>;
     template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -143,7 +155,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::SoftmaxAttr>::data;
 	}
 };
@@ -159,4 +171,6 @@ std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "");
 
 } // namespace Aidge
 
+#undef LIST_SOFTMAX_ATTR
+
 #endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 8b6acb06023f5f71cbb71b42281f21bda19caaed..038879f05dfc57f6451d0c490ec52e8283a1b93f 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -44,36 +44,41 @@ public:
      */
     void forward() override;
 };
+} // naemspace Aidge
 
+#define LIST_SPLIT_ATTR(X) \
+    X(Axis, "axis", std::int8_t), \
+    X(Split, "split", std::vector<DimSize_t>)
+
+namespace Aidge {
 /**
  * @enum SplitAttr
  * @brief Enumeration of Split operator attributes.
+ *
+ * - Axis: Axis along which to split the input tensor.
+ *   - The specified axis determines the direction of splitting.
+ * - Split: Sizes of each output tensor after splitting.
+ *   - If specified, the sum of the split sizes must match the size of the input
+ *     tensor along the specified axis.
  */
 enum class SplitAttr {
-    /**
-     * @brief Axis along which to split the input tensor.
-     *
-     * The specified axis determines the direction of splitting.
-     */
-    Axis,
-
-    /**
-     * @brief Sizes of each output tensor after splitting.
-     *
-     * If specified, the sum of the split sizes must match the size of the input
-     * tensor along the specified axis.
-     */
-    Split
+    GENERATE_LIST_ATTR_ENUM(LIST_SPLIT_ATTR)
 };
 } // namespace Aidge
 
 namespace {
-    /**
-     * @brief EnumStrings specialization for SplitAttr.
-     */
-    template <>
-    const char* const EnumStrings<Aidge::SplitAttr>::data[] = {"axis", "split"};
-    }
+/**
+ * @brief EnumStrings specialization for SplitAttr.
+ */
+template <>
+struct EnumStrings<Aidge::SplitAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::SplitAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_SPLIT_ATTR)
+};
+}
+
 
 namespace Aidge {
 /**
@@ -109,7 +114,7 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<SplitAttr, std::int8_t, std::vector<DimSize_t>>;
+    using Attributes_ = StaticAttributes<SplitAttr,GENERATE_LIST_ATTR_TYPE(LIST_SPLIT_ATTR)>;
     template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -188,7 +193,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::SplitAttr>::data;
 	}
 };
@@ -209,5 +214,6 @@ std::shared_ptr<Node> Split(DimSize_t nbOutput,
 
 }  // namespace Aidge
 
+#undef LIST_SPLIT_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_SPLIT_H_ */
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 69fa9d493a321199ea2fddd61c7b769a668c6f42..987f1e6af1452d9513cd855e63a8f8504721e25a 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -40,19 +40,34 @@ public:
       : OperatorImpl(op, backend) {}
   void forward() override;
 };
+} // namespace Aidge
+
+#define LIST_SQUEEZE_ATTR(X) \
+    X(Axes, "axes", std::vector<std::int8_t>)
 
+namespace Aidge {
+/**
+ * @enum SqueezeAttr
+ * @brief Enumeration of Squeeze operator attributes.
+ *
+ * - Axes: axes to squeeze, if left empty all 1 sized
+ * dimensions will be removed.
+ */
 enum class SqueezeAttr {
-  /**
-   * @brief axes to squeeze, if left empty all 1 sized
-   * dimensions will be removed.
-   */
-  Axes
+    GENERATE_LIST_ATTR_ENUM(LIST_SQUEEZE_ATTR)
 };
 } // namespace Aidge
+
 namespace {
-  template <>
-  const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"axes"};
+template <>
+struct EnumStrings<Aidge::SqueezeAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::SqueezeAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_SQUEEZE_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @brief This operator has as purpose to remove dummy dimensions around given
@@ -152,7 +167,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::SqueezeAttr>::data;
 	}
 };
@@ -165,4 +180,6 @@ inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {},
 }
 } // namespace Aidge
 
+#undef LIST_SQUEEZE_ATTR
+
 #endif // AIDGE_CORE_OPERATOR_SQUEEZE_H_
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
index 21442844789f065cf0f127db4380f70c4618ca86..84341375649e6d8d4948283971e86042cc003fd4 100644
--- a/include/aidge/operator/Stack.hpp
+++ b/include/aidge/operator/Stack.hpp
@@ -89,20 +89,41 @@ public:
      * @brief Executes the forward pass for the Stack operation.
      */
     void forward() override;
+
+    /**
+     * @brief Executes the backward pass for the Stack operation.
+     */
+    void backward() override;
 };
 
+#define LIST_STACK_ATTR(X)  \
+    X(ForwardStep, "forward_step", std::uint32_t), \
+    X(BackwardStep, "backward_step", std::uint32_t), \
+    X(MaxElements, "max_elements", std::uint32_t)
+
+/**
+ * @enum StackAttr
+ * @brief Attributes for the Stack operation.
+ *
+ * - ForwardStep: Tracks the current step in the forward pass.
+ * - BackwardStep: Tracks the current step in the backward pass.
+ * - MaxElements: Maximum number of elements that can be stacked.
+ */
 enum class StackAttr {
-    ForwardStep,   // Tracks the current step in the forward pass.
-    MaxElements    // Maximum number of elements that can be stacked.
+    GENERATE_LIST_ATTR_ENUM(LIST_STACK_ATTR)
 };
 }  // namespace Aidge
+
 namespace {
-    /**
-     * @brief String representations of the Stack operator's attributes.
-     */
-    template <>
-    const char *const EnumStrings<Aidge::StackAttr>::data[] = {"forward_step", "max_elements"};
+template <>
+struct EnumStrings<Aidge::StackAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::StackAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_STACK_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @class StackOp
@@ -123,7 +144,9 @@ namespace Aidge {
 class StackOp : public OperatorTensor,
     public Registrable<StackOp, std::string, std::function<std::unique_ptr<OperatorImpl>(const StackOp&)>> {
 private:
-    using Attributes_ = StaticAttributes<StackAttr, std::uint32_t, std::uint32_t>;
+    using Attributes_ = StaticAttributes<StackAttr,
+            GENERATE_LIST_ATTR_TYPE(LIST_STACK_ATTR)
+        >;
     template <StackAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -181,6 +204,11 @@ public:
      */
     void forward() override;
 
+    /**
+     * @brief Executes the backward pass for the `Stack` operation.
+     */
+    void backward() override;
+
     /**
      * @brief Access the operator's attributes.
      * @return A shared pointer to the operator's attributes.
@@ -205,6 +233,15 @@ public:
         return mAttributes->template getAttr<StackAttr::ForwardStep>();
     }
 
+    /**
+     * @brief Get or set the backward step counter for the operator.
+     * @return A reference to the backward step counter.
+     */
+    inline std::uint32_t& backwardStep() const {
+        return mAttributes->template getAttr<StackAttr::BackwardStep>();
+    }
+
+
     /**
      * @brief Retrieve the names of the operator's input tensors.
      * @return A vector of strings representing input tensor names.
@@ -225,7 +262,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::StackAttr>::data;
 	}
 };
@@ -239,5 +276,6 @@ public:
 std::shared_ptr<Node> Stack(std::uint32_t maxElements = 0, const std::string& name = "");
 }  // namespace Aidge
 
+#undef LIST_STACK_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_STACK_H_ */
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 2619c5ea5d41407100b66f909d6f64176027f74c..25d8d92f67901dbeb7cf0610a0f818cdbf60b0bd 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -46,28 +46,11 @@ public:
      */
     void forward() override;
 };
-
-/**
- * @enum TransposeAttr
- * @brief Enumeration of attributes specific to the Transpose operator.
- */
-enum class TransposeAttr {
-    /**
-     * @brief Order of the output dimensions relative to the input dimensions.
-     *
-     * If this attribute is empty, the dimensions of the input tensor will
-     * be reversed.
-     */
-    OutputDimsOrder
-};
 } // namespace Aidge
-namespace {
-    /**
-     * @brief EnumStrings specialization for TransposeAttr.
-     */
-    template <>
-    const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"output_dims_order"};
-    }
+
+#define LIST_TRANSPOSE_ATTR(X) \
+    X(OutputDimsOrder, "output_dims_order", std::vector<DimSize_t>)
+
 namespace Aidge {
 /**
  * @brief Describes the operation of transposing the axes of a given tensor.
@@ -84,17 +67,30 @@ namespace Aidge {
  * @see Registrable
  */
 class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> {
-
+                public Registrable<Transpose_Op,
+                                   std::string,
+                                   std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> {
 public:
     /**
      * @brief Static type string for the Transpose operator.
      */
     static const std::string Type;
 
+    /**
+     * @enum Attr
+     * @brief Enumeration of attributes specific to the Transpose operator.
+     *
+     * - OutputDimsOrder:  Order of the output dimensions relative to the input dimensions.
+     * If this attribute is empty, the dimensions of the input tensor will
+     * be reversed.
+     */
+    enum class Attr {
+        GENERATE_LIST_ATTR_ENUM(LIST_TRANSPOSE_ATTR)
+    };
+
 private:
-    using Attributes_ = StaticAttributes<TransposeAttr, std::vector<DimSize_t>>;
-    template <TransposeAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<Attr, GENERATE_LIST_ATTR_TYPE(LIST_TRANSPOSE_ATTR)>;
+    template <Attr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -156,7 +152,7 @@ public:
      * If left empty, axes will be reversed.
      */
     inline std::vector<DimSize_t>& outputDimsOrder() const noexcept {
-        return mAttributes->getAttr<TransposeAttr::OutputDimsOrder>();
+        return mAttributes->getAttr<Attr::OutputDimsOrder>();
     }
 
     /**
@@ -179,9 +175,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
-		return EnumStrings<Aidge::TransposeAttr>::data;
-	}
+	static constexpr const char* const* attributesName();
 };
 
 /**
@@ -196,5 +190,20 @@ std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder =
 
 }  // namespace Aidge
 
+namespace {
+template <>
+struct EnumStrings<Aidge::Transpose_Op::Attr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::Transpose_Op::Attr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_TRANSPOSE_ATTR)
+};
+}
+
+constexpr const char* const* Aidge::Transpose_Op::attributesName() {
+    return EnumStrings<Aidge::Transpose_Op::Attr>::data;
+}
+
+#undef LIST_TRANSPOSE_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_TRANSPOSE_H_ */
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index d220807d6cd4ea2c57c152c9e8351bc48211d06e..fe85f9d5e999ab2e6b6a0ae65f3d8ef43cdea0b3 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -22,9 +22,7 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/operator/Producer.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
-#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
@@ -50,39 +48,37 @@ public:
      */
     void forward() override;
 };
+} //namespace Aidge
 
+#define LIST_UNFOLD_ATTR(X)  \
+    X(StrideDims, "stride_dims", sizeArr_t<DIM>),  \
+    X(DilationDims, "dilation_dims", sizeArr_t<DIM>),  \
+    X(KernelDims, "kernel_dims", sizeArr_t<DIM>)
+
+namespace Aidge {
 /**
  * @enum UnfoldAttr
- * @brief Enumeration of attributes specific to the Unfold operator.
+ * @brief Enumeration for the attributes of the Unfold operation.
+ *
+ * - StrideDims: Step sizes in each dimension during the unfold operation.
+ * - DilationDims: Spacing between elements in the kernel during the unfold.
+ * - KernelDims: Size of the kernel or filter applied during the unfold.
  */
 enum class UnfoldAttr {
-    /**
-     * @brief Stride dimensions for the unfolding operation.
-     */
-    StrideDims,
-
-    /**
-     * @brief Dilation dimensions for the unfolding operation.
-     */
-    DilationDims,
-
-    /**
-     * @brief Kernel dimensions for the unfolding operation.
-     */
-    KernelDims
+    GENERATE_LIST_ATTR_ENUM(LIST_UNFOLD_ATTR)
 };
-} // namespace Aidge
+}  // namespace Aidge
+
 namespace {
-    /**
-     * @brief EnumStrings specialization for UnfoldAttr.
-     */
-    template <>
-    const char* const EnumStrings<Aidge::UnfoldAttr>::data[] = {
-        "stride_dims",
-        "dilation_dims",
-        "kernel_dims"
-    };
+template <>
+struct EnumStrings<Aidge::UnfoldAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::UnfoldAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_UNFOLD_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @brief Describes the operation of unfolding a tensor into sliding blocks.
@@ -109,10 +105,7 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<UnfoldAttr,
-                                         std::array<DimSize_t, DIM>,
-                                         std::array<DimSize_t, DIM>,
-                                         std::array<DimSize_t, DIM>>;
+    using Attributes_ = StaticAttributes<UnfoldAttr, GENERATE_LIST_ATTR_TYPE(LIST_UNFOLD_ATTR)>;
     template <UnfoldAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -216,7 +209,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::UnfoldAttr>::data;
 	}
 };
@@ -249,5 +242,6 @@ inline std::shared_ptr<Node> Unfold( DimSize_t const (&kernelDims)[DIM],
 
 extern template class Aidge::Unfold_Op<2>;
 
+#undef LIST_UNFOLD_ATTR
 
 #endif /* AIDGE_CORE_OPERATOR_UNFOLD_H_ */
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index a78a986724d4b5ca06f611b82e057d13183c5015..5975ff0578ee89a50e1871b71f77846cb63c9d4d 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -37,21 +37,35 @@ public:
       : OperatorImpl(op, backend) {}
   void forward() override;
 };
+}  // namespace Aidge
 
+#define LIST_UNSQUEEZE_ATTR(X)  \
+    X(Axes, "axes", std::vector<std::int8_t>)
+
+namespace Aidge {
+/**
+ * @enum UnsqueezeAttr
+ * @brief Attributes for the Unsqueeze operation.
+ *
+ * - Axes: A vector of axes to unsqueeze.
+ *   - Values must be within the range [ -a ; a-1 ],
+ *     where `a = input_tensor.nbDim() + dims_to_unsqueeze.size()`.
+ */
 enum class UnsqueezeAttr {
-  /**
-   * @brief vector of axes to unsqueeze.
-   * values must be comprised within
-   * [ -a ; a-1 ]
-   * with a = input_tensor.nbDim() + dims_to_unsqueeze.size()
-   */
-  Axes
+    GENERATE_LIST_ATTR_ENUM(LIST_UNSQUEEZE_ATTR)
 };
-} // namespace Aidge
+}  // namespace Aidge
+
 namespace {
-  template <>
-  const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"axes"};
+template <>
+struct EnumStrings<Aidge::UnsqueezeAttr> {
+    static const char* const data[];
+};
+constexpr const char* const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {
+    GENERATE_LIST_ATTR_STR(LIST_UNSQUEEZE_ATTR)
+};
 }
+
 namespace Aidge {
 /**
  * @brief This operator has as purpose to add a dummy dimension around given
@@ -72,7 +86,7 @@ public:
       Type; // name of the type of the operation (Here "Unsqueeze")
 
 private:
-  using Attributes_ = StaticAttributes<UnsqueezeAttr, std::vector<int8_t>>;
+  using Attributes_ = StaticAttributes<UnsqueezeAttr, GENERATE_LIST_ATTR_TYPE(LIST_UNSQUEEZE_ATTR)>;
   template <UnsqueezeAttr e>
   using attr = typename Attributes_::template attr<e>;
   const std::shared_ptr<Attributes_> mAttributes;
@@ -150,7 +164,7 @@ public:
 	 * @brief Retrieves the names of the attributes for the operator.
 	 * @return A vector containing the attributes name.
 	 */
-	static const char* const* attributesName(){
+	static constexpr const char* const* attributesName(){
 		return EnumStrings<Aidge::UnsqueezeAttr>::data;
 	}
 };
@@ -163,4 +177,6 @@ inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
 }
 } // namespace Aidge
 
+#undef LIST_UNSQUEEZE_ATTR
+
 #endif // AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index 0a3f5dc4d0ea00ddb5c8d0b8885269c882f7f705..b0bc6dcef823204bff248164d2b6bc13de9b35ec 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -51,11 +51,14 @@ void matMulToFC(std::shared_ptr<GraphView> graphView);
 size_t removeNode(std::shared_ptr<GraphView> graphView, const std::string& type, bool incProducers = false);
 
 /**
- * @brief Fuses constant => Generic | constantOfShape and transforms it into a Producer
- * @param graph Graph to manipulate
- * @return size_t Number of replacement
+ * @brief Compute the output of any ConstantOfShape with a constant input and
+ * replaces it with a Producer.
+ * @details Replace "constant->GenericOperator|ConstantOfShape" with "Producer".
+ * @note Currently, this function only matches the query "Producer->ConstantOfShape".
+ * @param graph GraphView to transform.
+ * @return std::size_t Number of replacements.
  */
-size_t removeConstantOfShape(std::shared_ptr<GraphView> graph_view);
+std::size_t foldConstantOfShape(std::shared_ptr<GraphView> view);
 
 /**
  * @brief Remove ``Dropout`` Node.
@@ -139,16 +142,16 @@ void expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive = false);
 
 /**
  * @brief Tile any :cpp:function:`Aidge::MatMul` operator to several fixed size matrix multiplications.
- * For instance, for a MatMul of size 80x80 and a tiling of 16x16, this will tile 
+ * For instance, for a MatMul of size 80x80 and a tiling of 16x16, this will tile
  * the MatMul operator to 25 (5 by 5) MatMul operators of size 16x16, with Slice
  * operators inserted at the inputs and Concat operators inserted at the outputs.
- * 
- * This is especially useful when matrix multiplication must be mapped to fixed 
- * maximum size hardware TPU (Tensor Processing Unit) or MMA (Matrix Multiplication 
- * Accelerator). This recipe can be combined with the :cpp:function:`Aidge::convToMatMul` recipe in 
- * order to convert convolutions to matrix multiplication beforehand, and 
+ *
+ * This is especially useful when matrix multiplication must be mapped to fixed
+ * maximum size hardware TPU (Tensor Processing Unit) or MMA (Matrix Multiplication
+ * Accelerator). This recipe can be combined with the :cpp:function:`Aidge::convToMatMul` recipe in
+ * order to convert convolutions to matrix multiplication beforehand, and
  * :cpp:function:`Aidge::constantFolding` recipe to fold sliced constant tensors.
- * 
+ *
  * @param matMul MatMul operator to be tiled.
  * @param maxDims Maximum output dimensions of the tiled MatMul operators.
  */
@@ -181,7 +184,7 @@ size_t convToMatMul(std::shared_ptr<GraphView> graph);
 
 /**
  * @brief Adapt a graph to the available kernels of a backend.
- * 
+ *
  * @param graph Graph to manipulate
  */
 void adaptToBackend(std::shared_ptr<GraphView> graph);
@@ -189,18 +192,18 @@ void adaptToBackend(std::shared_ptr<GraphView> graph);
 
 /**
  * @brief Create a GenericOp from an Operator and replace it
- * 
+ *
  * @param node Node which Operator will be changed into a generic Operator
  */
 void toGenericOp(std::shared_ptr<Node> node);
 
 /**
  * @brief The node passed contains an operator which input of index 1 is supposed be be weights of type Int4, Int3, Int2, binary.
- *        This recipie only operates memory transformations on the weight tensor. 
+ *        This recipie only operates memory transformations on the weight tensor.
  *        First, permutes the dimensions to match the dataformat NHWC
- *        Second, compact the last dimension of the weights (Channel dimension) into 8bits 
- * 
- * @param node Node 
+ *        Second, compact the last dimension of the weights (Channel dimension) into 8bits
+ *
+ * @param node Node
  */
 void applyWeightInterleaving(std::shared_ptr<Node> node);
 
diff --git a/include/aidge/utils/Directories.hpp b/include/aidge/utils/Directories.hpp
index 783783946ff5bdae5214cc41f6a1f029fae6e09c..c42280a6d67cfc86c64013b236690bf84f985f66 100644
--- a/include/aidge/utils/Directories.hpp
+++ b/include/aidge/utils/Directories.hpp
@@ -11,11 +11,10 @@
 #ifndef AIDGE_DIRECTORIES_H_
 #define AIDGE_DIRECTORIES_H_
 
-#include <algorithm>
+#include <algorithm>  // std::replace_if
 #include <errno.h>
 #include <string>
 // #include <string_view> available in c++-17
-#include <vector>
 
 #include <fmt/core.h>
 #include <fmt/format.h>
diff --git a/include/aidge/utils/FileManagement.hpp b/include/aidge/utils/FileManagement.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..8158fbf19f9e6c62ee5cc967c4a2bb03ba09d0a2
--- /dev/null
+++ b/include/aidge/utils/FileManagement.hpp
@@ -0,0 +1,28 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstdio>     // std::fclose, std::fopen
+#include <memory>
+#include <string>
+
+namespace Aidge {
+struct FileDeleter {
+    void operator()(FILE* fp) const {
+        if (fp) {
+            std::fclose(fp);
+        }
+    }
+};
+
+inline std::unique_ptr<FILE, FileDeleter> createFile(const std::string& fileName, const char* accessibility = "w") {
+    return std::unique_ptr<FILE, FileDeleter>(std::fopen(fileName.c_str(), accessibility));
+}
+}
diff --git a/include/aidge/utils/Types.h b/include/aidge/utils/Types.h
index b601df1cb8f8fa81cd2339e7eb393f7297e63499..c6b8bd4b26fc26a11d753581964a8b2761dcc82f 100644
--- a/include/aidge/utils/Types.h
+++ b/include/aidge/utils/Types.h
@@ -13,10 +13,10 @@
 #ifndef AIDGE_TYPES_H_
 #define AIDGE_TYPES_H_
 
-#include <limits>
-#include <type_traits>
 #include <cstddef>
 #include <cstdint>
+#include <limits>
+#include <type_traits>
 
 namespace Aidge
 {
@@ -60,6 +60,8 @@ constexpr IOIndex_t gk_IOMaxIndex = std::numeric_limits<IOIndex_t>::max() - 1;
 // using IOIndex_t = std::uint16_t;
 // constexpr IOIndex_t gk_IOMaxNb = std::numeric_limits<IOIndex_t>::max();
 
+// type used by StaticAttribute MACROs
+template <DimSize_t DIM> using sizeArr_t = std::array<DimSize_t, DIM>;
 
 } // namespace Aidge
 
diff --git a/python_binding/data/pybind_DataFormat.cpp b/python_binding/data/pybind_DataFormat.cpp
index a63df321c3298284df7de8fd2c3eb0fc0cecae24..5308fb3023e15a74f0dc5f674917d7ae65cbb52f 100644
--- a/python_binding/data/pybind_DataFormat.cpp
+++ b/python_binding/data/pybind_DataFormat.cpp
@@ -66,7 +66,7 @@ void bindEnum(py::module& m, const std::string& name) {
 void init_DataFormat(py::module& m) {
     bindEnum<DataFormat>(m, "dformat");
     m.def("format_as", (const char* (*)(DataFormat)) &format_as, py::arg("df"));
-    m.def("get_data_format_transpose", &getDataFormatTranspose, py::arg("src"), py::arg("dst"));
+    m.def("get_permutation_mapping", &getPermutationMapping, py::arg("src"), py::arg("dst"));
 }
 
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Abs.cpp b/python_binding/operator/pybind_Abs.cpp
index e8ae1c26e5c454cb130c55abd3a77b3ca7d5b7ff..8df1bfd13bb8720e84e5595cca2c6419f2737293 100644
--- a/python_binding/operator/pybind_Abs.cpp
+++ b/python_binding/operator/pybind_Abs.cpp
@@ -9,9 +9,10 @@
  *
  ********************************************************************************/
 
+#include <memory>
+
 #include <pybind11/pybind11.h>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Abs.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index cd85c73d3fd1a4181f7042bf0495f09046817fc4..8145000702fd1db680b7c5b8fcc6d90ff67c03ce 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -15,7 +15,6 @@
 
 #include "aidge/operator/Add.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
diff --git a/python_binding/operator/pybind_And.cpp b/python_binding/operator/pybind_And.cpp
index 13c0f9085c0f2cb0c5e278e23a7005a5aa3470f2..bd3366ef843efde47ef03f86f23e8a2b4df15a01 100644
--- a/python_binding/operator/pybind_And.cpp
+++ b/python_binding/operator/pybind_And.cpp
@@ -9,9 +9,10 @@
  *
  ********************************************************************************/
 
+#include <memory>
+
 #include <pybind11/pybind11.h>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/And.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 199ef813481e324c3dbbbfbe6db2dad125a213d1..4bcb94c4a78d96828b010e2448bd52f3d2486384 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -10,10 +10,10 @@
  ********************************************************************************/
 
 #include <string>
+#include <vector>
 
 #include <pybind11/pybind11.h>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 6afeb42a71787146b773fd2e460da4db3228c1c1..1c1f027dc56627a1fdb5292b5ec94197ad9c2d29 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -24,11 +24,11 @@ void init_Gather(py::module& m) {
     py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor>(m, "GatherOp", py::multiple_inheritance(),
         R"mydelimiter(
         Initialize a Gather operator, which extracts elements from a tensor at specified indices along a given axis.
-        
-        This operation selects values along the specified axis based on the provided indices, which can be 
-        a 1D or multidimensional tensor. The resulting tensor will have the same shape as the input tensor 
+
+        This operation selects values along the specified axis based on the provided indices, which can be
+        a 1D or multidimensional tensor. The resulting tensor will have the same shape as the input tensor
         except along the given axis, where the size will be determined by the indices.
-        
+
         :param axis : Axis along which to gather the elements.
         :type axis : int
         :param indices : Indices to gather along the axis.
@@ -48,7 +48,7 @@ void init_Gather(py::module& m) {
 		.def_static("attributes_name", []() {
 			std::vector<std::string> result;
 			auto attributes = Gather_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<GatherAttr>::data); ++i) {
+			for (size_t i = 0; i < size(EnumStrings<Gather_Op::Attr>::data); ++i) {
 				result.emplace_back(attributes[i]);
 			}
 			return result;
@@ -57,14 +57,14 @@ void init_Gather(py::module& m) {
 
     declare_registrable<Gather_Op>(m, "GatherOp");
 
-    m.def("Gather", &Gather, 
+    m.def("Gather", &Gather,
           py::arg("axis") = 0,
           py::arg("indices") = std::vector<std::int64_t>(),
           py::arg("gathered_shape") = std::vector<std::size_t>(),
           py::arg("name") = "",
           R"mydelimiter(
           Initialize a node containing a Gather operator that extracts elements from a tensor along a specified axis.
-          
+
           This operation selects values along the specified axis using the provided indices. The resulting tensor
           will have the same shape as the input tensor except along the given axis, where the size will be determined
           by the indices.
diff --git a/python_binding/operator/pybind_Heaviside.cpp b/python_binding/operator/pybind_Heaviside.cpp
index b8d7f1d802701933a7c1b5be9dcc7d9163f770a4..078b766a09306ea2808827243ea10f119d281604 100644
--- a/python_binding/operator/pybind_Heaviside.cpp
+++ b/python_binding/operator/pybind_Heaviside.cpp
@@ -24,9 +24,9 @@ void init_Heaviside(py::module &m) {
         "HeavisideOp",
         py::multiple_inheritance(),
          R"mydelimiter(
-          Initialize an Heaviside node. This node will compute a heaviside step function 
+          Initialize an Heaviside node. This node will compute a heaviside step function
           on each element of the input tensor.
-          heaviside(input, values) = { 0  if input < 0 
+          heaviside(input, values) = { 0  if input < 0
                                      { values if input == 0
                                      { 1 if input > 0
 
@@ -41,7 +41,7 @@ void init_Heaviside(py::module &m) {
 		.def_static("attributes_name", []() {
 			std::vector<std::string> result;
 			auto attributes = Heaviside_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<HeavisideAttr>::data); ++i) {
+			for (size_t i = 0; i < size(EnumStrings<Heaviside_Op::Attr>::data); ++i) {
 				result.emplace_back(attributes[i]);
 			}
 			return result;
@@ -51,9 +51,9 @@ void init_Heaviside(py::module &m) {
     declare_registrable<Heaviside_Op>(m, "HeavisideOp");
     m.def("Heaviside", &Heaviside, py::arg("value"), py::arg("name") = "",
             R"mydelimiter(
-          Initialize an Heaviside node. This node will compute a heaviside step function 
+          Initialize an Heaviside node. This node will compute a heaviside step function
           on each element of the input tensor.
-          heaviside(input, values) = { 0  if input < 0 
+          heaviside(input, values) = { 0  if input < 0
                                      { values if input == 0
                                      { 1 if input > 0
 
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index ab81052d21e477a64a9f90766504741f4386730c..8bc120c8aa3e585a3e792ab3337fc9c602f6afe9 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -34,7 +34,7 @@ void init_LeakyReLU(py::module& m) {
 		.def_static("attributes_name", []() {
 			std::vector<std::string> result;
 			auto attributes = LeakyReLU_Op::attributesName();
-			for (size_t i = 0; i < size(EnumStrings<LeakyReLUAttr>::data); ++i) {
+			for (size_t i = 0; i < size(EnumStrings<LeakyReLU_Op::Attr>::data); ++i) {
 				result.emplace_back(attributes[i]);
 			}
 			return result;
diff --git a/python_binding/operator/pybind_Memorize.cpp b/python_binding/operator/pybind_Memorize.cpp
index f583602c95692ff6e6084ba510f109e1f7ba65f9..ed87f68c77d196da91f31bb8730f93c26da5938f 100644
--- a/python_binding/operator/pybind_Memorize.cpp
+++ b/python_binding/operator/pybind_Memorize.cpp
@@ -27,7 +27,7 @@ void init_Memorize(py::module& m) {
 		.def_static("attributes_name", []() {
 			std::vector<std::string> result;
 			auto attributes = Memorize_Op::attributesName();
-			for (size_t i = 0;i < size(EnumStrings<MemorizeAttr>::data); ++i) {
+			for (size_t i = 0;i < size(EnumStrings<Memorize_Op::Attr>::data); ++i) {
 				result.emplace_back(attributes[i]);
 			}
 			return result;
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index 8222a6a03a86dfa11862814a7038b92e60ba88d2..41ef91ed9383f72bc9b3bb3971dedbec0256c7b0 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -9,9 +9,10 @@
  *
  ********************************************************************************/
 
+#include <memory>
+
 #include <pybind11/pybind11.h>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index d29f6bfe7aa2f5f44bbc407923dce5bc5968fcc3..a97c3795b305bf4cfc584f50d171c9f435809a7a 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -9,17 +9,16 @@
  *
  ********************************************************************************/
 
-#include <array>
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
-#include <string>
-#include <vector>
+ #include <cstdint>  // std::int32_t
+ #include <memory>
+ #include <string>
+ #include <vector>
+
+ #include <pybind11/pybind11.h>
+ #include <pybind11/stl.h>
 
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/ReduceMean.hpp"
-#include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
@@ -30,13 +29,13 @@ void declare_ReduceMeanOp(py::module &m) {
     m, pyClassName.c_str(), py::multiple_inheritance(),
       R"mydelimiter(
 		Initialize a ReduceMean operator.
-			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1],
 						where r is the rank of the input tensor.
 			:type axes: List[int]
-			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False,
 							the reduced dimensions are removed.
 			:type keepdims: bool
-			:param noop_with_empty_axes: If True, the operator just copies the input, 
+			:param noop_with_empty_axes: If True, the operator just copies the input,
       if False, the operatpr reduces all the dimensions.
 			:type noop_with_empty_axes: bool
 		)mydelimiter")
@@ -46,7 +45,7 @@ void declare_ReduceMeanOp(py::module &m) {
 	.def_static("attributes_name", []() {
 		std::vector<std::string> result;
 		auto attributes = ReduceMean_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<ReduceMeanAttr>::data); ++i) {
+		for (size_t i = 0; i < size(EnumStrings<ReduceMean_Op::Attr>::data); ++i) {
 			result.emplace_back(attributes[i]);
 		}
 		return result;
@@ -68,13 +67,13 @@ void declare_ReduceMeanOp(py::module &m) {
        py::arg("name") = "",
 	   R"mydelimiter(
         Initialize a node containing a ReduceMean operator.
-			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1],
 						where r is the rank of the input tensor.
 			:type axes: List[int]
-			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False,
 							the reduced dimensions are removed.
 			:type keepdims: bool
-			:param noop_with_empty_axes: If True, the operator just copies the input, 
+			:param noop_with_empty_axes: If True, the operator just copies the input,
       if False, the operatpr reduces all the dimensions.
 			:type noop_with_empty_axes: bool
 			:param name : name of the node.
diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp
index f139f2e7b4ef1484430b814023296149734fd54a..7517c62d2082215a25a3f632a5bc59555319fa57 100644
--- a/python_binding/operator/pybind_ReduceSum.cpp
+++ b/python_binding/operator/pybind_ReduceSum.cpp
@@ -9,17 +9,16 @@
  *
  ********************************************************************************/
 
-#include <array>
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
+#include <cstdint>  // std::int32_t
+#include <memory>
 #include <string>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/ReduceSum.hpp"
-#include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
@@ -30,13 +29,13 @@ void init_ReduceSum(py::module &m) {
     m, pyClassName.c_str(), py::multiple_inheritance(),
       R"mydelimiter(
 		Initialize a ReduceMean operator.
-			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1],
 						where r is the rank of the input tensor.
 			:type axes: List[int]
-			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False,
 							the reduced dimensions are removed.
 			:type keepdims: bool
-			:param noop_with_empty_axes: If True, the operator just copies the input, 
+			:param noop_with_empty_axes: If True, the operator just copies the input,
       if False, the operatpr reduces all the dimensions.
 			:type noop_with_empty_axes: bool
 		)mydelimiter")
@@ -47,7 +46,7 @@ void init_ReduceSum(py::module &m) {
 	.def_static("attributes_name", []() {
 		std::vector<std::string> result;
 		auto attributes = ReduceSum_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<ReduceSumAttr>::data); ++i) {
+		for (size_t i = 0; i < size(EnumStrings<ReduceSum_Op::Attr>::data); ++i) {
 			result.emplace_back(attributes[i]);
 		}
 		return result;
@@ -66,13 +65,13 @@ void init_ReduceSum(py::module &m) {
        py::arg("name") = "",
 	   R"mydelimiter(
         Initialize a node containing a ReduceMean operator.
-			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1],
 						where r is the rank of the input tensor.
 			:type axes: List[int]
-			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False,
 							the reduced dimensions are removed.
 			:type keepdims: bool
-			:param noop_with_empty_axes: If True, the operator just copies the input, 
+			:param noop_with_empty_axes: If True, the operator just copies the input,
       if False, the operatpr reduces all the dimensions.
 			:type noop_with_empty_axes: bool
 			:param name : name of the node.
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index 1882aa4c439b88413a3d9e94d4df0605bfec87a1..75bedca305eadb8624552ef7e95f10a547abeab7 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -41,7 +41,7 @@ void declare_Transpose(py::module &m) {
 	.def_static("attributes_name", []() {
 		std::vector<std::string> result;
 		auto attributes = Transpose_Op::attributesName();
-		for (size_t i = 0; i < size(EnumStrings<TransposeAttr>::data); ++i) {
+		for (size_t i = 0; i < size(EnumStrings<Transpose_Op::Attr>::data); ++i) {
 			result.emplace_back(attributes[i]);
 		}
 		return result;
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index 21478a5b14d609801f232b20cda25e7e1c0d9475..500367cb8f58bbfbf76394b0c83cd8ff848ff8cb 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -79,7 +79,7 @@ void init_Recipes(py::module &m)
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
-  m.def("remove_constantOfShape", static_cast<size_t(*)(std::shared_ptr<GraphView>)>(removeConstantOfShape), py::arg("graph_view"), R"mydelimiter(
+  m.def("fold_constantOfShape", static_cast<size_t(*)(std::shared_ptr<GraphView>)>(foldConstantOfShape), py::arg("graph_view"), R"mydelimiter(
     Fuses constant => Generic | constantOfShape and transforms it into a Producer
 
     :param graph_view: Graph view on which we want to apply the recipe.
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 08f5fe671c7502a6c5fe01dbdfb7ae4c9b95ac81..480e751807d85c4f74039e35c284f13f03013650 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -196,7 +196,7 @@ bool Aidge::OperatorImpl::checkIOSpec(const ImplSpec::IOSpec& required, const Im
         && spec.format != DataFormat::Any
         && required.format != spec.format)
     {
-        const auto transpose = getDataFormatTranspose(required.format, spec.format);
+        const auto transpose = getPermutationMapping(required.format, spec.format);
         std::vector<size_t> identity(transpose.size());
         std::iota(std::begin(identity), std::end(identity), 0);
 
@@ -261,7 +261,7 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
             && IOSpec.format != DataFormat::Any
             && requiredIOSpec.format != IOSpec.format)
         {
-            const auto transpose = getDataFormatTranspose(requiredIOSpec.format, IOSpec.format);
+            const auto transpose = getPermutationMapping(requiredIOSpec.format, IOSpec.format);
             auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
             transposeOp->getOperator()->setDataFormat(IOSpec.format);
             transposeOp->getOperator()->setDataType(requiredIOSpec.type);
@@ -315,7 +315,7 @@ std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec&
             && IOSpec.format != DataFormat::Any
             && requiredIOSpec.format != IOSpec.format)
         {
-            const auto transpose = getDataFormatTranspose(IOSpec.format, requiredIOSpec.format);
+            const auto transpose = getPermutationMapping(IOSpec.format, requiredIOSpec.format);
             auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
             transposeOp->getOperator()->setDataFormat(requiredIOSpec.format);
             transposeOp->getOperator()->setDataType(requiredIOSpec.type);
diff --git a/src/data/DataFormat.cpp b/src/data/DataFormat.cpp
index 466da86c469d89e5f1f4fc0895223513783b801c..8b7460b3d42de2e2ddb60a6332405141f87b297c 100644
--- a/src/data/DataFormat.cpp
+++ b/src/data/DataFormat.cpp
@@ -9,34 +9,73 @@
  *
  ********************************************************************************/
 
+#include <array>
+#include <cstddef>  // std::size_t
+
 #include "aidge/data/DataFormat.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+
+namespace Aidge {
+
+/**
+ * @brief Get the permutation array mapping from the default (NCHW) ordering to a given data format.
+ *
+ * @param dformat The target data format.
+ * @return const DataFormatTranspose& The permutation array (0-indexed).
+ *
+ * @note For DataFormat::Default and DataFormat::Any, an empty permutation is returned.
+ */
+static const DataFormatTranspose& getPermutationFromNCHW(DataFormat dformat) {
+    constexpr static const std::array<DataFormatTranspose, NB_DFORMAT> permutationFromNCHW = {{
+#define X(EnumName, Str, NumDims, Perm, Desc) Perm
+        LIST_DATAFORMAT_ATTR(X)
+#undef X
+    }};
+    return permutationFromNCHW[static_cast<std::size_t>(dformat)];
+}
+
+
+/**
+ * @brief Retrieve the number of dimensions for a given data format.
+ *
+ * @param dformat The data format.
+ * @return constexpr std::size_t The number of dimensions.
+ */
+static std::size_t getNbDimensions(DataFormat dformat) {
+    constexpr static const std::array<std::size_t, NB_DFORMAT> nbDimensions = {
+#define X(name, str, nb, arr, desc) nb
+        LIST_DATAFORMAT_ATTR(X)
+#undef X
+    };
+    return nbDimensions[static_cast<std::size_t>(dformat)];
+}
+
+DataFormatTranspose getPermutationMapping(const DataFormat& src, const DataFormat& dst) {
+    AIDGE_ASSERT((src != DataFormat::Any && dst != DataFormat::Any), "Permutation is not defined for DataFormat::Any");
+    if (src == DataFormat::Default || dst == DataFormat::Default || src == dst) {
+        return {0,1,2,3,4};
+    }
+    const std::size_t nbDims = getNbDimensions(src);
+    AIDGE_ASSERT(nbDims == getNbDimensions(dst), "Incompatible format conversion. Current and new data format must have the same number of dimensions.");
+
+    // Get permutation from default (NCHW) to source and destination.
+    const DataFormatTranspose& nchw_to_src = getPermutationFromNCHW(src);
+    const DataFormatTranspose& nchw_to_dst = getPermutationFromNCHW(dst);
 
-Aidge::DataFormatTranspose Aidge::getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
-    // Permutation array from default format to src format
-    const auto srcDefToFormat = DataFormatTransposeDict[static_cast<int>(src)];
-    // Permutation array from default format to dst format
-    const auto dstDefToFormat = DataFormatTransposeDict[static_cast<int>(dst)];
-    // Compute permutation array from src format to default format:
-    DataFormatTranspose srcFormatToDef{};
-    for (size_t i = 0; i < srcDefToFormat.size(); ++i) {
-        if (srcDefToFormat[i] > 0) {
-            srcFormatToDef[srcDefToFormat[i] - 1] = i;
-        }
-        else {
-            srcFormatToDef[i] = i;
-        }
+    // Compute inverse permutation: mapping from source format to default (NCHW).
+    DataFormatTranspose src_to_nchw{};
+    for (std::size_t i = 0; i < nbDims; ++i) {
+        // Since the permutations are 0-indexed, simply invert.
+        src_to_nchw[nchw_to_src[i]] = i;
     }
 
-    // Compute permutation array from src format to dst format:
-    DataFormatTranspose srcToDst{};
-    for (size_t i = 0; i < dstDefToFormat.size(); ++i) {
-        if (dstDefToFormat[srcFormatToDef[i]] > 0) {
-            srcToDst[i] = dstDefToFormat[srcFormatToDef[i]] - 1;
-        }
-        else {
-            srcToDst[i] = srcFormatToDef[i];
-        }
+    // Compute mapping from source format to destination format.
+    DataFormatTranspose src_to_dst{};
+    for (std::size_t i = 0; i < nbDims; ++i) {
+        src_to_dst[i] = nchw_to_dst[src_to_nchw[i]];
     }
 
-    return srcToDst;
+    return src_to_dst;
 }
+} // namespace Aidge
\ No newline at end of file
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index dd17cd34447ce208a4cd0dd00d2b05a8bee1f590..a789ea4b1228345e5e334016bebbe5d773428908 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -33,6 +33,7 @@
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/Memorize.hpp"
 #include "aidge/utils/Directories.hpp"
+#include "aidge/utils/FileManagement.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
@@ -85,7 +86,7 @@ bool Aidge::GraphView::inView(const std::string& nodeName) const {
 }
 
 void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProducers) const {
-    auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((path + ".mmd").c_str(), "w"), &std::fclose);
+    auto fp = createFile(path + ".mmd", "w");
 
     if (!fp) {
         AIDGE_THROW_OR_ABORT(std::runtime_error,
@@ -262,7 +263,7 @@ void Aidge::GraphView::logOutputs(const std::string& dirName) const {
 
     for (IOIndex_t outIdx = 0; outIdx < nodePtr->nbOutputs(); ++outIdx) {
       const std::string& inputPath = nodePath +"output_" + std::to_string(outIdx) + ".log";
-      auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen(inputPath.c_str(), "w"), &std::fclose);
+      auto fp = createFile(inputPath, "w");
       if (!fp) {
         AIDGE_THROW_OR_ABORT(std::runtime_error,
             "Could not create graph view log file: {}", inputPath);
@@ -1123,7 +1124,7 @@ void Aidge::GraphView::insertParent(NodePtr childNode,
  * | >1 node, 1 input    |     trivial      |      trivial      |     broadcast      |    broadcast       |
  * | 1 node, >1 inputs   |   (take first)   |   (take first)    |     same order     |       X            |
  * | >1 node, >1 inputs  |       X          |        X          |         X          |       X            |
- * 
+ *
  * Outputs conditions:
  * |  old    \     new   | 1 node, 1 output | >1 node, 1 output | 1 node, >1 outputs | >1 node, >1 outputs |
  * | ------------------- | ---------------- | ----------------- | ------------------ | ------------------- |
@@ -1131,7 +1132,7 @@ void Aidge::GraphView::insertParent(NodePtr childNode,
  * | >1 node, 1 output   |     trivial      |      trivial      |     take first     |       X             |
  * | 1 node, >1 outputs  |   (take first)   |   (take first)    |     same order     |       X             |
  * | >1 node, >1 outputs |       X          |        X          |         X          |       X             |
- * 
+ *
  * Only the X cases cannot possibly be resolved deterministically with sets of node.
  * These cases are therefore forbidden for the set-based `replace()` interface.
  * The remaining cases are handled by the GraphView-based `replace()` interface.
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 1c8585d1d1f26341724486a16d0678d92f759146..0dec30c2f2f2ffcb0f83740c863d46d7169d2f06 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -173,6 +173,12 @@ std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> Aidge::No
     return res;
 }
 
+std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t> Aidge::Node::input(const Aidge::IOIndex_t inID) const {
+    // nbInputs already < gk_IODefaultIndex
+    AIDGE_ASSERT((inID < nbInputs()), "Input index out of bound.");
+    return std::pair<NodePtr, IOIndex_t>(mParents[inID], mIdOutParents[inID]);
+}
+
 // void Aidge::Node::setInput(const Aidge::IOIndex_t idx, const std::shared_ptr<Aidge::Tensor>
 // tensor) {
 //     assert(((idx != gk_IODefaultIndex) && (idx < nbInputs())) && "Parent index out of bound.");
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 2077cab52f613780e77bba80efacb41d06a7f3cf..d69aad616bcdaedd7ffa9cdb04d02802bb998f5a 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -19,6 +19,7 @@
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Producer.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -40,13 +41,13 @@ Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    if (!inputsAssociated()) 
+    if (!inputsAssociated())
         return false;
     // first check weight since it defines inChannels and outChannels
     if(getInput(0)->dataFormat() == Aidge::DataFormat::NHWC){
         AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
                 (getInput(0)->template dims<DIM+2>()[DIM+1] == inChannels()),
-                "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), fmt::join(std::vector<std::string>(DIM, "x"), ", "), inChannels());
+                "Wrong input channel size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), fmt::join(std::vector<std::string>(DIM, "x"), ", "), inChannels());
     }
     else{ //For dataFormat in NCHW or Default Format
         AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
@@ -65,31 +66,31 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
     std::array<DimSize_t, DIM + 2> outputDims{};
 
-    
+
     unsigned int in_dims_index = (getInput(0)->dataFormat() == Aidge::DataFormat::NHWC) ? 1 : 2;
     unsigned int out_dims_index = (getOutput(0)->dataFormat() == Aidge::DataFormat::NHWC) ? 1 : 2;
 
-    for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size(); ++dim) {
-        const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] *
-                                    (mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
+    for (std::size_t dim = 0; dim < mAttributes->template getAttr<Attr::KernelDims>().size(); ++dim) {
+        const DimSize_t kernelExtent = mAttributes->template getAttr<Attr::DilationDims>()[dim] *
+                                    (mAttributes->template getAttr<Attr::KernelDims>()[dim] - 1) +
                                     1;
-        
+
         outputDims[dim + out_dims_index] = 1 + static_cast<DimSize_t>(
             floor(static_cast<float>(inputDims[dim + in_dims_index] - kernelExtent) /
-                static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim]))
+                static_cast<float>(mAttributes->template getAttr<Attr::StrideDims>()[dim]))
         );
     }
 
-    if(getOutput(0)->dataFormat() == Aidge::DataFormat::NHWC) 
+    if(getOutput(0)->dataFormat() == Aidge::DataFormat::NHWC)
         outputDims[DIM+1] = outChannels();
-    else 
+    else
         outputDims[1] = outChannels();
 
     outputDims[0] = inputDims[0];
     mOutputs[0]->resize(outputDims);
     return true;
-    
-    
+
+
 }
 
 template <Aidge::DimIdx_t DIM>
@@ -122,18 +123,18 @@ Aidge::Conv_Op<DIM>::computeReceptiveField(
         std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
         for (DimIdx_t i = 0; i < DIM; ++i) {
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                        * mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        * mAttributes->template getAttr<Attr::StrideDims>()[static_cast<std::size_t>(i)]
                         + 1
-                        + (mAttributes->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                        * mAttributes->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-            inputIdxDims[2+i] *= mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
+                        + (mAttributes->template getAttr<Attr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * mAttributes->template getAttr<Attr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= mAttributes->template getAttr<Attr::StrideDims>()[static_cast<std::size_t>(i)];
         }
 
         // Weight
         // same output value, every input channel is used
         std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
         for (std::size_t i = 0; i < DIM; ++i) {
-            weightDims.push_back(mAttributes->template getAttr<ConvAttr::KernelDims>()[i]);
+            weightDims.push_back(mAttributes->template getAttr<Attr::KernelDims>()[i]);
         }
         std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
         weightIdxDims[0] = firstEltDims[1];
@@ -173,6 +174,28 @@ void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+Aidge::DimSize_t Aidge::Conv_Op<DIM>::inChannels() const {
+    if (!getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
+    }
+
+    // check format
+    if(getInput(1)->dataFormat()==Aidge::DataFormat::NHWC)
+        return getInput(1)->template dims<DIM+2>()[DIM+1];
+    // default format is NCHW
+    return getInput(1)->template dims<DIM+2>()[1];
+}
+
+template <Aidge::DimIdx_t DIM>
+Aidge::DimSize_t Aidge::Conv_Op<DIM>::outChannels() const {
+    if (!getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed.");
+    }
+    // first weight dimension for both NCHW (Cout,Cin,H,W) and NHWC (Cout,H,W,Cin) data format
+    return getInput(1)->template dims<DIM+2>()[0];
+}
+
 template <Aidge::DimIdx_t DIM>
 std::set<std::string> Aidge::Conv_Op<DIM>::getAvailableBackends() const {
     return Registrar<Conv_Op<DIM>>::getKeys();
@@ -203,3 +226,20 @@ std::shared_ptr<Aidge::Node> Aidge::Conv(Aidge::DimSize_t inChannels,
 
 template std::shared_ptr<Aidge::Node> Aidge::Conv<1>(Aidge::DimSize_t, Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
 template std::shared_ptr<Aidge::Node> Aidge::Conv<2>(Aidge::DimSize_t, Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
+
+template <Aidge::DimSize_t DIM>
+std::shared_ptr<Aidge::Node> Aidge::Conv(
+    Aidge::DimSize_t inChannels,
+    Aidge::DimSize_t outChannels,
+    Aidge::DimSize_t const (&kernelDims)[DIM],
+    const std::string& name,
+    const std::array<Aidge::DimSize_t, DIM> &strideDims,
+    const std::array<Aidge::DimSize_t, DIM> &dilationDims,
+    bool noBias)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
+    return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Conv<1>(Aidge::DimSize_t, Aidge::DimSize_t, Aidge::DimSize_t const (&)[1], const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Conv<2>(Aidge::DimSize_t, Aidge::DimSize_t, Aidge::DimSize_t const (&)[2], const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
\ No newline at end of file
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index e0990437a06d5b9fb72cf1909d78f6094120bf80..10e20046f0565d098275141e90e920ce78725e0f 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -28,9 +28,9 @@ Aidge::Gather_Op::Gather_Op(std::int8_t axis,
               const std::vector<Aidge::DimSize_t>& gatheredShape)
     : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
     mAttributes(std::make_shared<Attributes_>(
-        attr<GatherAttr::Axis>(axis),
-        attr<GatherAttr::Indices>(indices),
-        attr<GatherAttr::GatheredShape>(gatheredShape)))
+        attr<Attr::Axis>(axis),
+        attr<Attr::Indices>(indices),
+        attr<Attr::GatheredShape>(gatheredShape)))
 {
     mImpl = std::make_shared<Gather_OpImpl>(*this);
 }
diff --git a/src/operator/Heaviside.cpp b/src/operator/Heaviside.cpp
index 9ecb3b436d8312ef479d6bc0592cfe372235fa25..6555a530bd02edf6f1823469297d289fb4b57b87 100644
--- a/src/operator/Heaviside.cpp
+++ b/src/operator/Heaviside.cpp
@@ -30,7 +30,7 @@ const std::string Heaviside_Op::Type = "Heaviside";
 Heaviside_Op::Heaviside_Op(float value)
     : OperatorTensor(Type, {InputCategory::Data}, 1),
       mAttributes(
-          std::make_shared<Attributes_>(attr<HeavisideAttr::Value>(value))) {}
+          std::make_shared<Attributes_>(attr<Attr::Value>(value))) {}
 
 Heaviside_Op::Heaviside_Op(const Heaviside_Op &op)
     : OperatorTensor(op), mAttributes(op.mAttributes) {
diff --git a/src/operator/LRN.cpp b/src/operator/LRN.cpp
index c5ce243bd6a48ae4b1ce8461924b498c804b53e6..5b7d663e78cf92047e3ed47212f2a27d42a8de49 100644
--- a/src/operator/LRN.cpp
+++ b/src/operator/LRN.cpp
@@ -23,10 +23,10 @@ const std::string Aidge::LRN_Op::Type = "LRN";
 Aidge::LRN_Op::LRN_Op(std::int32_t size)
     : OperatorTensor(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
-        attr<LRNAttr::Alpha>(0.0001),
-        attr<LRNAttr::Beta>(0.75),
-        attr<LRNAttr::Bias>(1.0),
-        attr<LRNAttr::Size>(size)))
+        attr<Attr::Alpha>(0.0001),
+        attr<Attr::Beta>(0.75),
+        attr<Attr::Bias>(1.0),
+        attr<Attr::Size>(size)))
 {}
 
 Aidge::LRN_Op::LRN_Op(const Aidge::LRN_Op& op)
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
index afd8e00cc07b9ecaf28fcb7d7b28fa3422446429..b0bd167dd28a10b22516259b5087a834bd6afeda 100644
--- a/src/operator/MaxPooling.cpp
+++ b/src/operator/MaxPooling.cpp
@@ -29,8 +29,8 @@ Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM>
                             bool ceil_mode)
     : OperatorTensor(Type, {InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
-    attr<MaxPoolingAttr::StrideDims>(stride_dims),
     attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+    attr<MaxPoolingAttr::StrideDims>(stride_dims),
     attr<MaxPoolingAttr::Dilations>(dilations),
     attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
 {}
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index c4f0bc4bf7267d24264652d5ed6b0d50935e1aa4..76d3ddd22e113f087f5afc0ebb358edce0b0fc32 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -78,9 +78,9 @@ const std::string Aidge::Memorize_Op::Type = "Memorize";
 Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
     : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 2),
         mAttributes(std::make_shared<Attributes_>(
-                    attr<MemorizeAttr::ScheduleStep>(0),
-                    attr<MemorizeAttr::ForwardStep>(0),
-                    attr<MemorizeAttr::EndStep>(endStep)))
+                    attr<Attr::ScheduleStep>(0),
+                    attr<Attr::ForwardStep>(0),
+                    attr<Attr::EndStep>(endStep)))
 {
     // The input idx 0 is a back edge for Memorize where inputs are (back, init)
     setBackEdges({0});
@@ -106,8 +106,8 @@ std::shared_ptr<Aidge::Operator> Aidge::Memorize_Op::clone() const {
 
 void Aidge::Memorize_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
-    ++mAttributes->template getAttr<MemorizeAttr::ScheduleStep>();
-    mAttributes->template getAttr<MemorizeAttr::ForwardStep>() = 0;
+    ++scheduleStep();
+    forwardStep() = 0;
 }
 
 bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) {
@@ -151,8 +151,8 @@ void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
 
 void Aidge::Memorize_Op::forward() {
     OperatorTensor::forward();
-    ++mAttributes->template getAttr<MemorizeAttr::ForwardStep>();
-    mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
+    ++forwardStep();
+    scheduleStep() = 0;
 }
 
 std::set<std::string> Aidge::Memorize_Op::getAvailableBackends() const {
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index a7d2a1da8e67d4b10c5fb49b6eeb491c0942a2f3..192cc9f5e272f8bb0f718e1dfb2a1100c05a72d1 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -19,6 +19,8 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
+#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
+
 
 Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph, const std::vector<InputCategory>& forcedInputsCategory)
     : OperatorTensor(type, [graph, forcedInputsCategory]() {
@@ -113,8 +115,7 @@ std::string Aidge::MetaOperator_Op::backend() const noexcept {
 void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
     if (Registrar<MetaOperator_Op>::exists({name, type()})) {
         // A custom implementation exists for this meta operator
-        mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
-
+        SET_IMPL_MACRO(MetaOperator_Op, *this, {name, type()});
         // Set backend for in/out tensor of the MetaOp
         for(auto i: mGraph->inputNodes()){
             auto op_i = std::static_pointer_cast<OperatorTensor>(i->getOperator());
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 9af4586886fc98c50862672392d3b704e6bc1d0c..0beaf91b3a31ee9347a91ae4b77287ac0abcdc20 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -32,7 +32,7 @@ Aidge::Producer_Op::Producer_Op(
             bool constant)
     : OperatorTensor(Type, {}, 1),
         mAttributes(std::make_shared<Attributes_>(
-        attr<ProdAttr::Constant>(constant)))
+        attr<ProducerAttr::Constant>(constant)))
 {
     mOutputs[0]->resize(dims);
     mImpl = std::make_shared<OperatorImpl>(*this);
@@ -41,7 +41,7 @@ Aidge::Producer_Op::Producer_Op(
 Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant)
     : OperatorTensor(Type, {}, 1),
       mAttributes(std::make_shared<Attributes_>(
-        attr<ProdAttr::Constant>(constant)))
+        attr<ProducerAttr::Constant>(constant)))
 {
     mOutputs[0] = tensor; // copy the pointer of the Tensor
     if (mOutputs[0] && mOutputs[0]->hasImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
@@ -100,7 +100,7 @@ void Aidge::Producer_Op::forward() {
 }
 
 void Aidge::Producer_Op::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const {
-    if (mAttributes->template getAttr<ProdAttr::Constant>()) {
+    if (mAttributes->template getAttr<ProducerAttr::Constant>()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
     }
     OperatorTensor::setOutput(outputIdx, data);
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 7935edb050824af92a8f130f975aa09e41ca875f..dfaa75a4883ce2c9dcc77f89dc9f970c3f1ed2cd 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -30,9 +30,9 @@ const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
     : OperatorTensor(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
-        attr<ReduceMeanAttr::Axes>(axes),
-        attr<ReduceMeanAttr::KeepDims>(keep_dims),
-        attr<ReduceMeanAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
+        attr<Attr::Axes>(axes),
+        attr<Attr::KeepDims>(keep_dims),
+        attr<Attr::NoopWithEmptyAxes>(noop_with_empty_axes)))
 {}
 
 Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op)
@@ -53,32 +53,32 @@ std::shared_ptr<Aidge::Operator> Aidge::ReduceMean_Op::clone() const {
 bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // make Axes attribute positive
-        std::vector<std::int32_t>& axes = mAttributes->template getAttr<ReduceMeanAttr::Axes>();
-        std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
+        std::vector<std::int32_t>& reduced_axes = axes();
+        std::for_each(reduced_axes.begin(), reduced_axes.end(), [&] (std::int32_t& val) {
             if (val < 0)
                 val+=static_cast<std::int32_t>(getInput(0)->nbDims());
         });
-        std::sort(axes.begin(), axes.end());
+        std::sort(reduced_axes.begin(), reduced_axes.end());
 
         // build output dimensions
         std::vector<DimSize_t> outDims = getInput(0)->dims();
 
-        if (axes.empty())
+        if (reduced_axes.empty())
         {
-            if(mAttributes->template getAttr<ReduceMeanAttr::NoopWithEmptyAxes>()) {
+            if(noopWithEmptyAxes()) {
                 mOutputs[0]->resize(outDims);
                 return true;
             }
             // if no axes are provided and NoopWithEmptyAxes is false, reduce on all axes
-            axes.resize(getInput(0)->nbDims());
-            std::iota(axes.begin(), axes.end(), 0);
+            reduced_axes.resize(getInput(0)->nbDims());
+            std::iota(reduced_axes.begin(), reduced_axes.end(), 0);
         }
 
-        if (mAttributes->template getAttr<ReduceMeanAttr::KeepDims>()) {
-            std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
+        if (keepDims()) {
+            std::for_each(reduced_axes.cbegin(), reduced_axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
         }
         else {
-            for (auto it = axes.crbegin(); it != axes.crend(); ++it)
+            for (auto it = reduced_axes.crbegin(); it != reduced_axes.crend(); ++it)
                 outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
         }
 
diff --git a/src/operator/ReduceSum.cpp b/src/operator/ReduceSum.cpp
index 0786f53c6b761e5cd9020352a2ecb92469a609d7..73b6722e15ebc7a32cbb502e83d5779558c1cac7 100644
--- a/src/operator/ReduceSum.cpp
+++ b/src/operator/ReduceSum.cpp
@@ -30,32 +30,32 @@ const std::string Aidge::ReduceSum_Op::Type = "ReduceSum";
 bool Aidge::ReduceSum_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // make Axes attribute positive
-        std::vector<std::int32_t>& axes = mAttributes->template getAttr<ReduceSumAttr::Axes>();
-        std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
+        std::vector<std::int32_t>& reduced_axes = axes();
+        std::for_each(reduced_axes.begin(), reduced_axes.end(), [&] (std::int32_t& val) {
             if (val < 0)
                 val+=static_cast<std::int32_t>(getInput(0)->nbDims());
         });
-        std::sort(axes.begin(), axes.end());
+        std::sort(reduced_axes.begin(), reduced_axes.end());
 
         // build output dimensions
         std::vector<DimSize_t> outDims = getInput(0)->dims();
 
-        if (axes.empty())
+        if (reduced_axes.empty())
         {
-            if(mAttributes->template getAttr<ReduceSumAttr::NoopWithEmptyAxes>()) {
+            if(noopWithEmptyAxes()) {
                 mOutputs[0]->resize(outDims);
                 return true;
             }
             // if no axes are provided and NoopWithEmptyAxes is false, reduce on all axes
-            axes.resize(getInput(0)->nbDims());
-            std::iota(axes.begin(), axes.end(), 0);
+            reduced_axes.resize(getInput(0)->nbDims());
+            std::iota(reduced_axes.begin(), reduced_axes.end(), 0);
         }
 
-        if (mAttributes->template getAttr<ReduceSumAttr::KeepDims>()) {
-            std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
+        if (keepDims()) {
+            std::for_each(reduced_axes.cbegin(), reduced_axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
         }
         else {
-            for (auto it = axes.crbegin(); it != axes.crend(); ++it)
+            for (auto it = reduced_axes.crbegin(); it != reduced_axes.crend(); ++it)
                 outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
         }
 
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
index a938f470ded47d65a9b15b93ca66dfe186d61e9f..9f8cd163922d5dec22614d213a6f4145b14b9aa1 100644
--- a/src/operator/Stack.cpp
+++ b/src/operator/Stack.cpp
@@ -61,10 +61,21 @@ void StackOpImpl::forward() {
         op.forwardStep() * op.getInput(0)->size());
 }
 
+void StackOpImpl::backward() {
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    AIDGE_ASSERT(op.backwardStep() > 0, "Stack operator has not been run forward");
+
+    auto inputGrad = op.getInput(0)->grad();
+    auto outputGrad = op.getOutput(0)->grad();
+
+    *inputGrad = outputGrad->extract({op.backwardStep() -1 }).clone();
+}
+
 StackOp::StackOp(std::uint32_t maxElements)
     : OperatorTensor(s_type, {InputCategory::Data, InputCategory::OptionalData}, 1),
       mAttributes(std::make_shared<Attributes_>(
           attr<StackAttr::MaxElements>(maxElements),
+          attr<StackAttr::BackwardStep>(0),
           attr<StackAttr::ForwardStep>(0))) {
     mImpl = std::make_shared<StackOpImpl>(*this);
 }
@@ -132,8 +143,14 @@ std::set<std::string> StackOp::getAvailableBackends() const {
 }
 
 void StackOp::forward() {
-    Operator::forward();
+    OperatorTensor::forward();
     ++forwardStep();
+    backwardStep() = forwardStep();
+}
+
+void StackOp::backward() {
+    OperatorTensor::backward();
+    --backwardStep();
 }
 
 std::shared_ptr<Node> Stack(std::uint32_t maxElements,
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index b550db16dfee8286242df7cfbed9b3b300ee96d5..f9d612353a5fe8764419d6ac2f7fe1702f2a5df8 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -35,7 +35,7 @@ const std::string Aidge::Transpose_Op::Type = "Transpose";
 Aidge::Transpose_Op::Transpose_Op(const std::vector<Aidge::DimSize_t> &outputDimsOrder)
     : OperatorTensor(Type, {InputCategory::Data}, 1),
         mAttributes(std::make_shared<Attributes_>(
-        attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)))
+        attr<Attr::OutputDimsOrder>(outputDimsOrder)))
 {
     mImpl = std::make_shared<TransposeImpl>(*this);
 }
diff --git a/src/recipes/ExplicitTranspose.cpp b/src/recipes/ExplicitTranspose.cpp
index 7ff971b7e436219d5dfbb7cbadbaf780d3f1aeda..c4e2c425c93f6306373a49c29b1d117a03af04ae 100644
--- a/src/recipes/ExplicitTranspose.cpp
+++ b/src/recipes/ExplicitTranspose.cpp
@@ -94,14 +94,14 @@ void Aidge::explicitTranspose(std::shared_ptr<GraphView> graph) {
                         else {
                             // Case 2: change of format
                             // => compute the new permutation array
-                            const auto transpose = getDataFormatTranspose(parentInput->dataFormat(), output->dataFormat());
+                            const auto transpose = getPermutationMapping(parentInput->dataFormat(), output->dataFormat());
                             auto transposeOp = std::static_pointer_cast<Transpose_Op>(parent.first->getOperator());
                             transposeOp->setDataFormat(output->dataFormat());
                             transposeOp->outputDimsOrder() = std::vector<DimSize_t>(transpose.begin(), transpose.end());
                         }
                     }
                     else {
-                        const auto transpose = getDataFormatTranspose(input->dataFormat(), output->dataFormat());
+                        const auto transpose = getPermutationMapping(input->dataFormat(), output->dataFormat());
                         auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
                         transposeOp->getOperator()->setDataFormat(output->dataFormat());
                         transposeOp->getOperator()->setDataType(output->dataType());
diff --git a/src/recipes/FoldConstantOfShape.cpp b/src/recipes/FoldConstantOfShape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..631c751920a41b79442d820097bd9a3f0e4e3b35
--- /dev/null
+++ b/src/recipes/FoldConstantOfShape.cpp
@@ -0,0 +1,112 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include "aidge/recipes/Recipes.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>   // std::int64_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+
+#include "aidge/data/DataType.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/ConstantOfShape.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Log.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+static bool foldIndividualConstantOfShape(const SinglePassGraphMatching::MatchingResult match, std::size_t namingId) {
+    const std::shared_ptr<Node> prod_node = match.graph->rootNode();
+    const std::shared_ptr<Producer_Op> prod_op =
+        std::static_pointer_cast<Producer_Op>(prod_node->getOperator());
+    prod_op->forward(); // is this REALLY needed if Producer is constant?
+    const std::shared_ptr<Tensor> shape = prod_op->getOutput(0);
+
+    if (!prod_op->constant()) {
+        Log::debug("{} - Producer is not constant. Skipping match.", __func__);
+        return false;
+    }
+    if (shape->nbDims() != 1) {
+        Log::debug("[{}] - ConstantOfShape 'shape' input Tensor has {} != 1 dimensions. Skipping match.",
+                    __func__, shape->nbDims());
+        return false;
+    }
+
+    if (shape->dataType() != DataType::Int64) {
+        Log::debug(
+            "Producer output data type is {} != {} required for ConstantOfShape "
+                "'shape' input. "
+                "Skipping match.",
+            prod_op->getOutput(0)->dataType(),
+            DataType::Int64
+        );
+        return false;
+    }
+
+    const std::shared_ptr<Node> constantofshape_node =
+        prod_node->getOrderedChildren().at(0).at(0);
+    const std::shared_ptr<ConstantOfShape_Op> constantofshape_op =
+        std::static_pointer_cast<ConstantOfShape_Op>(
+            constantofshape_node->getOperator());
+
+    std::shared_ptr<GraphView> graph_to_replace = std::make_shared<GraphView>();
+    graph_to_replace->add({constantofshape_node, prod_node});
+
+    constantofshape_op->forwardDims(true); // ConstantOfShape forwardDims is data dependent
+
+    std::string original_backend = constantofshape_op->backend().empty() ? "cpu" : constantofshape_op->backend();
+    graph_to_replace->setBackend("cpu"); // set backend to 'cpu' since speed is not the main focus here
+
+    constantofshape_op->forward();
+
+    const std::shared_ptr<Tensor> newInputTensor = constantofshape_op->getOutput(0);
+    newInputTensor->setBackend(original_backend); // set back original backend
+    const std::shared_ptr<Node> newProducer =
+        Producer(newInputTensor,
+            "constantOfShape_" + constantofshape_node->name() + "_folded_" + std::to_string(namingId),
+            true // remains constant
+        );
+
+    std::shared_ptr<GraphView> new_graph = std::make_shared<GraphView>();
+    new_graph->add(newProducer);
+
+    return GraphView::replace(graph_to_replace, new_graph);
+}
+
+std::size_t foldConstantOfShape(std::shared_ptr<GraphView> view) {
+    // this query guarantes that any Producer part of a returned match is ONLY followed by a ConstantOfShape
+    const auto matches = SinglePassGraphMatching(view).match("Producer->ConstantOfShape");
+
+    std::size_t nbReplaced = 0;
+    if (!Registrar<ConstantOfShape_Op>::exists("cpu")) {
+        Log::error("'cpu' backend not loaded. Impossible to run and fold any ConstantOfShape Operator.");
+    } else {
+        for (const auto &match : matches) {
+            if (!foldIndividualConstantOfShape(match, nbReplaced)) {
+                // TODO: this warning should be handled by GraphView::replace
+                Log::warn("Could not replace match with Producer");
+            } else {
+                ++nbReplaced;
+            }
+        }
+    }
+
+    Log::info("Removed [\033[1m\033[3m{}/{}\033[0m] ConstantOfShape Nodes",
+                nbReplaced, matches.size());
+    return nbReplaced;
+}
+
+} // namespace Aidge
diff --git a/src/recipes/removeConstantOfShape.cpp b/src/recipes/removeConstantOfShape.cpp
deleted file mode 100644
index e743050c2c0f13513f639a0690943e0d934f947d..0000000000000000000000000000000000000000
--- a/src/recipes/removeConstantOfShape.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-#include "aidge/recipes/Recipes.hpp"
-
-#include <algorithm>
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <cstdio>
-#include <functional>
-#include <memory>
-#include <numeric>
-#include <set>
-#include <stdexcept>
-#include <string>
-
-#include "aidge/data/Data.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/filler/Filler.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/graph/Matching.hpp"
-#include "aidge/graph/Node.hpp"
-#include "aidge/operator/ConstantOfShape.hpp"
-#include "aidge/operator/GenericOperator.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/utils/ErrorHandling.hpp"
-#include "aidge/utils/Types.h"
-
-namespace Aidge {
-
-size_t removeConstantOfShape(std::shared_ptr<GraphView> graph_view) {
-  const auto matches =
-      SinglePassGraphMatching(graph_view).match("Producer->ConstantOfShape");
-
-  size_t nbReplaced = 0;
-  for (const auto &match : matches) {
-    const auto prod_node = match.graph->rootNode();
-    const auto prod_op =
-        std::static_pointer_cast<Producer_Op>(prod_node->getOperator());
-
-    const NodePtr constantofshape_node =
-        prod_node->getOrderedChildren().at(0).at(0);
-
-    const auto constantofshape_op =
-        std::static_pointer_cast<ConstantOfShape_Op>(
-            constantofshape_node->getOperator());
-
-    if (prod_op->getOutput(0)->nbDims() != 1) {
-      Log::debug("{} : Producer output dimension number is {} != 1 and {} "
-                 "input has to have 1 dim, skipping match.",
-                 __func__, prod_op->getOutput(0)->nbDims(),
-                 ConstantOfShape_Op::Type);
-      continue;
-    }
-    if (!prod_op->constant()) {
-      Log::debug("{} : Producer is not constant, skipping match.", __func__);
-      continue;
-    }
-    if (prod_op->getOutput(0)->dataType() != DataType::Int64) {
-      AIDGE_THROW_OR_ABORT(
-          std::runtime_error,
-          "{} : Producer output dtype is {} != int64 and {} "
-          "input type is restricted to int64_t, this is an error."
-          "Fix your network. skipping match.",
-          __func__, prod_op->getOutput(0)->dataType(),
-          ConstantOfShape_Op::Type);
-      continue;
-    }
-
-    auto graph_to_replace = std::make_shared<GraphView>();
-    auto new_graph = std::make_shared<GraphView>();
-    graph_to_replace->add(constantofshape_node);
-    if (prod_node->getChildren().size() == 1) {
-      graph_to_replace->add(prod_node);
-    } else {
-      Log::debug("{} : Producer node has multiple children, only"
-                 "replacing the {} node.",
-                 __func__, ConstantOfShape_Op::Type);
-    }
-
-    prod_node->forward();
-    std::shared_ptr<Tensor> prod_output = prod_op->getOutput(0);
-    std::vector<DimSize_t> new_input_dims;
-    new_input_dims.reserve(prod_output->dims()[0]);
-    for (DimSize_t i = 0; i < prod_output->size(); ++i) {
-      new_input_dims.push_back(prod_output->get<int64_t>(i));
-    }
-
-    auto new_input = std::make_shared<Tensor>(new_input_dims);
-    new_input->setBackend(prod_op->backend() == "" ? "cpu"
-                                                   : prod_op->backend());
-    new_input->setDataType(constantofshape_op->value().dataType());
-    for (std::size_t i = 0; i < new_input->size(); ++i) {
-      new_input->getImpl()->copy(
-          constantofshape_op->value().getImpl()->rawPtr(), 1, i);
-    }
-    auto new_prod =
-        Producer(new_input, prod_node->name() + "_constant_of_shape", true);
-    new_graph->add(new_prod);
-
-    const auto success = GraphView::replace(graph_to_replace, new_graph);
-    if (!success) {
-      Log::warn("Could not replace Producer({})->ConstantOfShape({}) with"
-                "Producer",
-                prod_node->name(), constantofshape_node->name());
-    } else {
-      ++nbReplaced;
-    }
-  }
-
-  Log::info("Replaced {} (out of {}) matching Producer->ConstantOfShape with "
-            "Producers",
-            nbReplaced, matches.size());
-  return nbReplaced;
-}
-} // namespace Aidge
-
diff --git a/src/scheduler/MemoryManager.cpp b/src/scheduler/MemoryManager.cpp
index 05f461b82f16b6af4ed412b7336aa2328bcafbe1..8e35913f4832f0e54e26f9be286943eb25f498ba 100644
--- a/src/scheduler/MemoryManager.cpp
+++ b/src/scheduler/MemoryManager.cpp
@@ -572,7 +572,7 @@ Aidge::MemoryManager::getPlanes(const std::shared_ptr<Node>& node) const
     const std::map<std::shared_ptr<Node>, std::vector<MemoryPlane> >
         ::const_iterator it = mMemPlanes.find(node);
 
-    if (it == mMemPlanes.end()) {
+    if (it == mMemPlanes.cend()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error,
             "getSize(): no memory allocated for node name {}", node->name());
     }
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 155a5e7e4689b2e0d645a4288b8a460c0687c395..fdda95727a7fbfcdadebc908c601ace0bf9c8684 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -34,6 +34,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/Concat.hpp"
+#include "aidge/utils/FileManagement.hpp"
 #include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
@@ -728,7 +729,8 @@ Aidge::MemoryManager Aidge::Scheduler::generateMemory(bool incProducers, bool wr
                             node->name(), node->type());
 
                         const bool isWrappable = (requiredProtected.data < requiredData.data);
-                        const MemoryManager::MemoryPlane& memPlane = memManager.getPlanes(parent.first).end()[-parent.first->nbOutputs()+parent.second];
+                        const auto& memPlanes = memManager.getPlanes(parent.first);
+                        const MemoryManager::MemoryPlane& memPlane = memPlanes.at(memPlanes.size() - parent.first->nbOutputs() + parent.second); // use at() to avoid dangling reference pointer
 
                         if (isWrappable || !memManager.isWrapAround(
                                     memPlane.memSpace,
@@ -810,7 +812,7 @@ Aidge::MemoryManager Aidge::Scheduler::generateMemoryAutoConcat(bool incProducer
 
             std::shared_ptr<Node> concat = nullptr;
             // If the only child is a concatenation, check if we can allocate
-            // the concatenation result directly and skip allocation for this 
+            // the concatenation result directly and skip allocation for this
             // node output:
             if (childs.size() == 1 && (*childs.begin())->type() == Concat_Op::Type) {
                 concat = *childs.begin();
@@ -896,10 +898,11 @@ Aidge::MemoryManager Aidge::Scheduler::generateMemoryAutoConcat(bool incProducer
                                 node->name(), node->type());
 
                             const bool isWrappable = (requiredProtected.data < requiredData.data);
+                            const auto& memPlanes = memManager.getPlanes(parent.first);
                             const MemoryManager::MemoryPlane& memPlane
                                 = (concat && itConcat != concatMemPlane.end())
                                     ? itConcat->second
-                                    : memManager.getPlanes(parent.first).end()[-parent.first->nbOutputs()+parent.second];
+                                    : memPlanes.at(memPlanes.size()-parent.first->nbOutputs()+parent.second); // use at() to avoid dangling reference pointer
 
                             if (isWrappable || !memManager.isWrapAround(
                                         memPlane.memSpace,
@@ -1039,7 +1042,7 @@ void Aidge::Scheduler::connectInputs(const std::vector<std::shared_ptr<Aidge::Te
 }
 
 void Aidge::Scheduler::saveSchedulingDiagram(const std::string& fileName, bool ignoreProducers) const {
-    auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((fileName + ".mmd").c_str(), "w"), &std::fclose);
+    auto fp = createFile(fileName + ".mmd", "w");
 
     if (!fp) {
         AIDGE_THROW_OR_ABORT(std::runtime_error,
@@ -1072,7 +1075,7 @@ void Aidge::Scheduler::saveSchedulingDiagram(const std::string& fileName, bool i
     fmt::print(fp.get(), "\n");
 }
 
-void Aidge::Scheduler::saveStaticSchedulingDiagram(const std::string& fileName, bool ignoreProducers) const {
+void Aidge::Scheduler::saveStaticSchedulingDiagram(const std::string& fileName) const {
     auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((fileName + ".mmd").c_str(), "w"), &std::fclose);
 
     if (!fp) {
@@ -1112,7 +1115,7 @@ void Aidge::Scheduler::saveStaticSchedulingDiagram(const std::string& fileName,
 }
 
 void Aidge::Scheduler::saveFactorizedStaticSchedulingDiagram(const std::string& fileName, size_t minRepeat) const {
-    auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((fileName + ".mmd").c_str(), "w"), &std::fclose);
+    auto fp = createFile(fileName + ".mmd", "w");
 
     if (!fp) {
         AIDGE_THROW_OR_ABORT(std::runtime_error,
diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp
index de33ddd5a7613cde16b96b23722f6d2ab412f373..9b32054c35b14702850820abf9930ce811719dc4 100644
--- a/unit_tests/operator/Test_Conv_Op.cpp
+++ b/unit_tests/operator/Test_Conv_Op.cpp
@@ -24,13 +24,13 @@
 namespace Aidge {
 TEST_CASE("[core/operator] Conv_Op(ForwardDims) ", "[Operator][ForwardDims][Conv]") {
     SECTION("I:NCHW O:NCHW W:NCHW"){
-        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,3,224,450})); 
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,3,224,450}));
         std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,3,3,4})); // Out_ch, In_ch_h,W,H
 
         const std::vector<std::size_t> expectedOutputDims({16,4,222,447});
         auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
 
-        //Set DataFormat 
+        //Set DataFormat
         conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NCHW);
         input->setDataFormat(Aidge::DataFormat::NCHW);
         weight->setDataFormat(Aidge::DataFormat::NCHW);
@@ -43,61 +43,61 @@ TEST_CASE("[core/operator] Conv_Op(ForwardDims) ", "[Operator][ForwardDims][Conv
         REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
     }
     SECTION("I:NCHW O:NCHW W:NHWC") {
-        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3, 224, 450})); 
-        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 4})); // Out_ch, H, W, In_ch
-    
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3, 224, 450}));
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 3})); // Out_ch, H, W, In_ch
+
         const std::vector<std::size_t> expectedOutputDims({16, 4, 222, 447});
         auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
-    
-        // Set DataFormat 
+
+        // Set DataFormat
         conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NCHW);
         input->setDataFormat(Aidge::DataFormat::NCHW);
         weight->setDataFormat(Aidge::DataFormat::NHWC); // NHWC weight format
-    
+
         // Set inputs
         conv1.setInput(1, weight);
         conv1.setInput(0, input);
-    
+
         REQUIRE(conv1.forwardDims());
         REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
     }
-    
+
     SECTION("I:NHWC O:NHWC W:NCHW") {
-        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3, 224, 450})); 
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 224, 450, 3}));
         std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 4})); // Out_ch, In_ch, H, W
-    
+
         const std::vector<std::size_t> expectedOutputDims({16, 222, 447, 4});
         auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
-    
-        // Set DataFormat 
+
+        // Set DataFormat
         conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
         input->setDataFormat(Aidge::DataFormat::NHWC);
         weight->setDataFormat(Aidge::DataFormat::NCHW); // NCHW weight format
-    
+
         // Set inputs
         conv1.setInput(1, weight);
         conv1.setInput(0, input);
-    
+
         REQUIRE(conv1.forwardDims());
         REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
     }
-    
+
     SECTION("I:NHWC O:NHWC W:NHWC") {
-        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3,224, 450})); 
-        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 4})); // (Out_ch, H, W, In_ch)
-    
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 224, 450, 3}));
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 4, 3})); // (Out_ch, H, W, In_ch)
+
         const std::vector<std::size_t> expectedOutputDims({16, 222, 447, 4});
         auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
-    
-        // Set DataFormat 
+
+        // Set DataFormat
         conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
         input->setDataFormat(Aidge::DataFormat::NHWC);
         weight->setDataFormat(Aidge::DataFormat::NHWC);
-    
+
         // Set inputs
         conv1.setInput(1, weight);
         conv1.setInput(0, input);
-    
+
         REQUIRE(conv1.forwardDims());
         REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
     }
diff --git a/unit_tests/operator/Test_StackImpl.cpp b/unit_tests/operator/Test_StackImpl.cpp
index ccdf5787d666f030b8856704eb0e4fb108089075..fe9ac0519c740bdf5d6be96f2dde187425c043a1 100644
--- a/unit_tests/operator/Test_StackImpl.cpp
+++ b/unit_tests/operator/Test_StackImpl.cpp
@@ -166,4 +166,62 @@ TEST_CASE("[core/operator] Stack(forward)", "[Stack]") {
         }
     }
 }
+
+TEST_CASE("[core/operator] Stack(backward)", "[Stack][Backward]") {
+    SECTION("Stack backward with fixed values") {
+        std::shared_ptr<Tensor> stack1 =
+            std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
+        std::shared_ptr<Tensor> stack2 =
+            std::make_shared<Tensor>(Array1D<int, 3>{{4, 5, 6}});
+
+        auto stack = Stack(2, "stack");
+        std::shared_ptr<StackOp> op =
+            std::static_pointer_cast<StackOp>(stack->getOperator());
+
+        //op->associateInput(0, stack1);
+        op->associateInput(0, stack1);
+        op->setBackend("cpu");
+        op->setDataType(DataType::Int32);
+        op->forwardDims();
+
+        // Simulate forward pass
+        op->forward();
+        op->forward();
+
+        auto newGrad = std::make_shared<Tensor>(
+            Tensor(Array2D<int, 2, 3>({{{1, 2, 3}, {4, 5, 6}}})));
+        op->getOutput(0)->setGrad(newGrad);
+
+        REQUIRE_NOTHROW(op->backward());
+        REQUIRE(*op->getInput(0)->grad() == *stack2);
+
+        REQUIRE_NOTHROW(op->backward());
+        REQUIRE(*op->getInput(0)->grad() == *stack1);
+    }
+
+    SECTION("Edge cases") {
+        std::shared_ptr<Tensor> stack1 =
+            std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
+        std::shared_ptr<Tensor> stack2 =
+            std::make_shared<Tensor>(Array1D<int, 3>{{4, 5, 6}});
+
+        auto stack = Stack(2, "stack");
+        std::shared_ptr<StackOp> op =
+            std::static_pointer_cast<StackOp>(stack->getOperator());
+
+        op->associateInput(0, stack1);
+        op->setBackend("cpu");
+        op->setDataType(DataType::Int32);
+
+
+        // Need to run forward before
+        REQUIRE_THROWS(op->backward());
+
+        op->forward();
+        op->backward();
+        REQUIRE(*op->getInput(0)->grad() == Tensor(Array1D<int, 3>({{0,0,0}})));
+    }
+}
+
+
 } // namespace Aidge
diff --git a/unit_tests/recipes/Test_removeConstantOfShape.cpp b/unit_tests/recipes/Test_FoldConstantOfShape.cpp
similarity index 56%
rename from unit_tests/recipes/Test_removeConstantOfShape.cpp
rename to unit_tests/recipes/Test_FoldConstantOfShape.cpp
index b912efc640fc901f694afeda256be91d51010419..02bace3c44d208044936766ec3fb1d30334ffdf3 100644
--- a/unit_tests/recipes/Test_removeConstantOfShape.cpp
+++ b/unit_tests/recipes/Test_FoldConstantOfShape.cpp
@@ -13,38 +13,38 @@
 #include "aidge/operator/Identity.hpp"
 #include "aidge/recipes/Recipes.hpp"
 
-#include <cstddef>
-#include <cstdint>
+#include <cstdint>  // std::int64_t
 #include <memory>
-#include <vector>
 
 #include <catch2/catch_test_macros.hpp>
 
 #include "aidge/graph/OpArgs.hpp"
-#include "aidge/operator/Add.hpp"
 #include "aidge/operator/ConstantOfShape.hpp"
 #include "aidge/operator/Conv.hpp"
-#include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/ReLU.hpp"
+#include "aidge/recipes/Recipes.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/Types.h"
 
-using namespace Aidge;
+namespace Aidge {
 
-TEST_CASE("[cpu/recipes] removeConstantOfShape",
-          "[ConstantOfShape][removeConstantOfShape][recipes]") {
-  auto input_T = std::make_shared<Tensor>(Array1D<int64_t, 4>({1, 1, 3, 3}));
+TEST_CASE("[cpu/recipes] foldConstantOfShape",
+          "[ConstantOfShape][foldConstantOfShape][recipes]") {
+  auto input_T = std::make_shared<Tensor>(Array1D<std::int64_t, 4>({1, 1, 3, 3}));
 
   auto model = std::make_shared<GraphView>();
   SECTION("Sequential model") {
-    model = Sequential({Producer(input_T, "prod_0", true),
-                        ConstantOfShape(3, "constantOfShape_0"),
-                        Conv(1, 1, {3, 3}, "Conv_0"), ReLU("ReLU_1")});
-    model->save("test_removeConstantOfShape_model_before_1");
-    CHECK(removeConstantOfShape(model) == 1);
-    CHECK(model->forwardDims());
-    model->save("test_removeConstantOfShape_model_after_1");
+    model = Sequential({
+        Producer(input_T, "prod_0", true),
+        ConstantOfShape(3, "constantOfShape_0"),
+        Conv(1, 1, {3, 3}, "Conv_0"),
+        ReLU("ReLU_1")
+    });
+    model->save("test_foldConstantOfShape_model_before_1");
+    // aidge_backend_cpu not loaded. Recipe should not work
+    REQUIRE(foldConstantOfShape(model) == 0);
   }
 }
 
+}  // namespace Aidge