diff --git a/aidge_core/aidge_export_aidge/operator_export/producer.py b/aidge_core/aidge_export_aidge/operator_export/producer.py
index bbeb0f1df20c7023c1097817a7682c38d03a93b9..93f90e0e599b81ef8b587234666426297c6589a1 100644
--- a/aidge_core/aidge_export_aidge/operator_export/producer.py
+++ b/aidge_core/aidge_export_aidge/operator_export/producer.py
@@ -2,7 +2,7 @@ from aidge_core.aidge_export_aidge.utils import operator_register
 from aidge_core.export_utils.data_conversion import aidge2c
 
 from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from aidge_core import DataType, ExportNode, generate_file, generate_str
+from aidge_core import dtype, ExportNode, generate_file, generate_str
 import numpy as np
 from pathlib import Path
 
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
index 88b976a47fdeaad587e9ec50c27a085e88de23c5..48d07e8db8d5fb116148e9d41100fffa01fcf622 100644
--- a/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
+++ b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
@@ -14,6 +14,4 @@
 #define _{{name|upper}}_DILATION_{{i}} {{DilationDims[i]}}
 {%- endfor %}
 
-#define _{{name|upper}}_NO_BIAS {{NoBias|int}}
-
 #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
index 96eabf09d47a9b72eeac8e95ce8f5eb8e6d30243..e292f9b611978877c47b15e91f926f30d27a1cc5 100644
--- a/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
+++ b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
@@ -4,6 +4,4 @@
 #define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
 #define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
 
-#define _{{name|upper}}_NO_BIAS {{NoBias|int}}
-
 #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
index 72a12c6015e3962eaecc0301f54bf228c16fb29c..a805f8065e87244bf0546ca42d294b86f144a26d 100644
--- a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
@@ -19,8 +19,7 @@ std::shared_ptr<Aidge::Node> {{name}} =
             {%- for i in range(DilationDims|length) -%}
                 _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%}
             {%- endfor -%}
-            },
-            _{{name|upper}}_NO_BIAS
+            }
         );
 {% include "./_set_input.jinja" %}
 graph->add({{name}});
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja
index 80d978543fd15d4cce3b4329a9bd3481fb88afaa..df6dbc83492174fc49348b8073deb47a5deca313 100644
--- a/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja
@@ -4,7 +4,6 @@ std::shared_ptr<Aidge::Node> {{name}} =
         Aidge::FC(
             _{{name|upper}}_IN_CHANNELS,
             _{{name|upper}}_OUT_CHANNELS,
-            _{{name|upper}}_NO_BIAS,
             "{{name}}"
         );
 {% include "./_set_input.jinja" %}
diff --git a/aidge_core/export_utils/node_export.py b/aidge_core/export_utils/node_export.py
index 7262e9a837424158b8896f305894dcc57769520c..80c37dd0a54d57561ce1a872ea540461aeec30a0 100644
--- a/aidge_core/export_utils/node_export.py
+++ b/aidge_core/export_utils/node_export.py
@@ -20,10 +20,7 @@ class ExportNode(ABC):
         self.node = aidge_node
         self.operator = aidge_node.get_operator()
         self.name = self.node.name()
-        self.attributes = {} # Attributes are auto fetched from aidge operators
-        if isinstance(self.operator, aidge_core.Attributes):
-            for attr_name in self.operator.get_attrs_name():
-                self.attributes[attr_name] = self.operator.get_attr(attr_name)
+        self.attributes = self.operator.attr.dict() if self.operator.attr is not None else {} # Attributes are auto fetched from aidge operators
 
         # rename is_leaf ?
         self.is_last = len(self.node.get_children()) == 0
diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py
index 6e0c1f9b9a0828e266ef3bf19ee75df3e275b282..26d60f2fbaf0f3903baf191cf0a2ad5550fb3275 100644
--- a/aidge_core/unit_tests/test_impl.py
+++ b/aidge_core/unit_tests/test_impl.py
@@ -41,6 +41,7 @@ class test_OperatorImpl(unittest.TestCase):
         generic_matmul_op = matmul.get_operator()
         generic_matmul_op.set_forward_dims(lambda x: x)
         generic_matmul_op.set_impl(testImpl(generic_matmul_op))
+        generic_matmul_op.set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
         generic_matmul_op.forward()
         self.assertEqual(GLOBAL_CPT, 1)
 
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index 164aee726255e0478b629ee853d9a1f619945f3a..5b25eb7975d439816dbf91cc95b462f217fd0227 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -30,42 +30,39 @@ class test_operator_binding(unittest.TestCase):
         self.assertNotEqual(gop.name(), "")
 
     def test_param_bool(self):
-        self.generic_operator.add_attr("bool", True)
-        self.assertEqual(self.generic_operator.has_attr("bool"), True)
-        self.assertEqual(self.generic_operator.get_attr("bool"), True)
-        self.assertEqual(self.generic_operator.get_attr_type("bool"), "bool")
-        self.assertEqual(self.generic_operator.get_attrs_name(), {"bool"})
-        self.generic_operator.del_attr("bool")
-        self.assertEqual(self.generic_operator.has_attr("bool"), False)
-        self.assertEqual(len(self.generic_operator.get_attrs_name()), 0)
+        self.generic_operator.attr.add_attr("bool", True)
+        self.assertEqual(self.generic_operator.attr.has_attr("bool"), True)
+        self.assertEqual(self.generic_operator.attr.get_attr("bool"), True)
+        self.generic_operator.attr.del_attr("bool")
+        self.assertEqual(self.generic_operator.attr.has_attr("bool"), False)
 
     def test_param_int(self):
-        self.generic_operator.add_attr("int", 1)
-        self.assertEqual(self.generic_operator.get_attr("int"), 1)
+        self.generic_operator.attr.add_attr("int", 1)
+        self.assertEqual(self.generic_operator.attr.get_attr("int"), 1)
 
     def test_param_float(self):
-        self.generic_operator.add_attr("float", 2.0)
-        self.assertEqual(self.generic_operator.get_attr("float"), 2.0)
+        self.generic_operator.attr.add_attr("float", 2.0)
+        self.assertEqual(self.generic_operator.attr.get_attr("float"), 2.0)
 
     def test_param_str(self):
-        self.generic_operator.add_attr("str", "value")
-        self.assertEqual(self.generic_operator.get_attr("str"), "value")
+        self.generic_operator.attr.add_attr("str", "value")
+        self.assertEqual(self.generic_operator.attr.get_attr("str"), "value")
 
     def test_param_l_int(self):
-        self.generic_operator.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
-        self.assertEqual(self.generic_operator.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.generic_operator.attr.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
 
     def test_param_l_bool(self):
-        self.generic_operator.add_attr("l_bool", [True, False, False, True])
-        self.assertEqual(self.generic_operator.get_attr("l_bool"), [True, False, False, True])
+        self.generic_operator.attr.add_attr("l_bool", [True, False, False, True])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_bool"), [True, False, False, True])
 
     def test_param_l_float(self):
-        self.generic_operator.add_attr("l_float", [2.0, 1.0])
-        self.assertEqual(self.generic_operator.get_attr("l_float"), [2.0, 1.0])
+        self.generic_operator.attr.add_attr("l_float", [2.0, 1.0])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_float"), [2.0, 1.0])
 
     def test_param_l_str(self):
-        self.generic_operator.add_attr("l_str", ["ok"])
-        self.assertEqual(self.generic_operator.get_attr("l_str"), ["ok"])
+        self.generic_operator.attr.add_attr("l_str", ["ok"])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_str"), ["ok"])
 
     def test_dynamicattribute_binding(self):
         # Check original C++ attributes are binded
@@ -76,20 +73,20 @@ class test_operator_binding(unittest.TestCase):
         self.assertEqual(attrs.get_attr("b"), "test")
         self.assertEqual(attrs.has_attr("c"), True)
         self.assertEqual(attrs.get_attr("c"), [True, False, True])
-        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c"})
+        self.assertEqual(attrs.dict().keys(), {"a", "b", "c"})
         self.assertEqual(attrs.has_attr("d"), False)
 
         # Add Python attributes
         attrs.add_attr("d", 18.56)
         self.assertEqual(attrs.get_attr("d"), 18.56)
         self.assertEqual(attrs.has_attr("d"), True)
-        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c", "d"})
+        self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d"})
         self.assertEqual(attrs.has_attr("e"), False)
 
         # Check that added Python attribute is accessible in C++
         # Return the value of an attribute named "d" of type float64 (double in C++)
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 18.56)
-        attrs.set_attr("d", 23.89)
+        attrs.d = 23.89
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
 
     def test_forward_dims(self):
@@ -129,18 +126,18 @@ class test_operator_binding(unittest.TestCase):
         myVar = 2
         myBool = True
         # Test dynamic attribute set
-        gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", myVar=myVar).get_operator()
-        gop.myBool = myBool
+        gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", my_var=myVar).get_operator()
+        gop.attr.my_bool = myBool
         # Test variable set by kwargs
-        self.assertEqual(gop.myVar, myVar)
+        self.assertEqual(gop.attr.my_var, myVar)
         # Test set attr
-        self.assertEqual(gop.myBool, myBool)
+        self.assertEqual(gop.attr.my_bool, myBool)
 
         # Test static attribute set !
         prod = aidge_core.Producer([1]).get_operator()
-        self.assertEqual(prod.Constant, False)
-        prod.Constant = True # By default Constant is False
-        self.assertEqual(prod.Constant, True)
+        self.assertEqual(prod.attr.constant, False)
+        prod.attr.constant = True # By default Constant is False
+        self.assertEqual(prod.attr.constant, True)
 
 
 
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index e7b16963f4c26e5d014ce90fa289c043e2eb0be4..7c3bc0f6f68506c02af1723b263455a9c72b1f3a 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -29,15 +29,13 @@ class test_attributes(unittest.TestCase):
         conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator()
         self.assertEqual(conv_op.in_channels(), in_channels)
         self.assertEqual(conv_op.out_channels(), out_channels)
-        self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
+        self.assertEqual(conv_op.attr.get_attr("kernel_dims"), k_dims)
 
     def test_fc(self):
         in_channels = 4
         out_channels = 8
-        nb_bias = True
-        fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator()
+        fc_op = aidge_core.FC(in_channels, out_channels).get_operator()
         self.assertEqual(fc_op.out_channels(), out_channels)
-        self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
 
     def test_producer_1D(self):
         dims = [5]
@@ -67,7 +65,7 @@ class test_attributes(unittest.TestCase):
     def test_leaky_relu(self):
         negative_slope = 0.25
         leakyrelu_op = aidge_core.LeakyReLU(negative_slope).get_operator()
-        self.assertEqual(leakyrelu_op.get_attr("NegativeSlope"), negative_slope)
+        self.assertEqual(leakyrelu_op.attr.get_attr("negative_slope"), negative_slope)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/aidge_core/unit_tests/test_tensor.py b/aidge_core/unit_tests/test_tensor.py
index d479c98b20534daa804f6019b63d528883c2b568..6348ba8dd1a635ce0299760b6fd31dcef58716cf 100644
--- a/aidge_core/unit_tests/test_tensor.py
+++ b/aidge_core/unit_tests/test_tensor.py
@@ -42,7 +42,7 @@ class test_tensor(unittest.TestCase):
         np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Int32)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int32)
         for i_t, i_n in zip(t, np_array.flatten()):
             self.assertTrue(i_t == i_n)
         for i,j in zip(t.dims(), np_array.shape):
@@ -62,7 +62,7 @@ class test_tensor(unittest.TestCase):
         np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Int64)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int64)
         for i_t, i_n in zip(t, np_array.flatten()):
             self.assertTrue(i_t == i_n)
         for i,j in zip(t.dims(), np_array.shape):
@@ -73,7 +73,7 @@ class test_tensor(unittest.TestCase):
         np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Float32)
+        self.assertEqual(t.dtype(), aidge_core.dtype.float32)
         for i_t, i_n in zip(t, np_array.flatten()):
             self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference
         for i,j in zip(t.dims(), np_array.shape):
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 940440bad52e367fe04872a308c99e4c802fa242..651a5de69596ee867a97b06ba683f49b05a41303 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -59,6 +59,7 @@
 #include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Reshape.hpp"
+#include "aidge/operator/Resize.hpp"
 #include "aidge/operator/Shape.hpp"
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/operator/Slice.hpp"
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 8fe2263c0aa2a2a3e70dc458ababc406b6823e0d..eaadc7a7ca5fa85672619fb2d3b5b17590fd3778 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -79,7 +79,7 @@ constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
  * Get the DataFormatTranspose array to transpose data from src to dst DataFormat.
  * @param src Source DataFormat
  * @param dst Destinatin DataFormat
- * @return DataFormatTranspose Permutation array to achieve a transposition 
+ * @return DataFormatTranspose Permutation array to achieve a transposition
  *         from src to dst DataFormat.
 */
 constexpr inline DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 5fef32c6c76ab0e8837fdd707138bf32caef517c..80f8408a01be5a9b1f485251af0b13b8069404c5 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -550,23 +550,16 @@ public:
 
     inline void print() const { fmt::print("{}\n", toString()); }
 
-    std::shared_ptr<Tensor> grad() {
-        return mGrad;
-    }
-    void setGrad(std::shared_ptr<Tensor> newGrad) {
-        mGrad = newGrad;
-    }
-
     /**
-     * @brief Associate the gradient with a Tensor instance and set its implementation
-     * if none was previously set.
+     * @brief Get the gradient Tensor. If not initialized, set a Tensor instance
+     * and set its implementation if none was previously set.
      * @note Dimensions for the Tensor instance are copied from the original current Tensor.
      * @note If a Tensor instance was already associated, only the implementation is created
      * with values set to 0.
      * @note If Tensor instance and implementation already existed for the gradient
      * nothing is done.
      */
-    void initGrad() {
+    std::shared_ptr<Tensor> grad() {
         if (!mGrad) {
             mGrad = std::make_shared<Tensor>(mDims);
         }
@@ -576,6 +569,11 @@ public:
             mGrad->setBackend(hasImpl() ? mImpl->backend() : "cpu");
             mGrad->zeros();
         }
+        return mGrad;
+    }
+
+    void setGrad(std::shared_ptr<Tensor> newGrad) {
+        mGrad = newGrad;
     }
 
     /**
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index b09064c36f65a1e00d99ce5e2ff559e31681b065..682634015376bf309a015046decfa40a36e2b177 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -20,9 +20,18 @@
 #include <utility>
 #include <vector>
 
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <fmt/format.h>
+#endif
+
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
 namespace Aidge {
 enum class DataType;
 
@@ -218,7 +227,7 @@ public:
      * GraphView object's Nodes, by calling Node::forwardDims().
      * This function verifies the following conditions:
      * - Every node will forwardDims() regardless of if dims were previously forwarded or not;
-     * - forwadDims() calls are made in node dependencies order, because if dims have changed 
+     * - forwadDims() calls are made in node dependencies order, because if dims have changed
      *   at any point in the graph, it must de propagated correctly to all succeeding nodes;
      * - It handles cyclic dependencies correctly (currently only induced by the Memorize_Op).
      */
@@ -266,7 +275,7 @@ public:
      * @brief Get the Nodes pointed to by the GraphView object.
      * @return std::set<NodePtr>
      */
-    inline const std::set<NodePtr>& getNodes() const { return mNodes; }
+    inline const std::set<NodePtr>& getNodes() const noexcept { return mNodes; }
 
     /**
      * @brief Get the operator with the corresponding name if it is in the
@@ -460,8 +469,8 @@ public:
      * @return true replacement has been performed
      * @return false no replacement has been performed
      */
-    static bool replace(const std::shared_ptr<GraphView>& oldG, const std::shared_ptr<GraphView>& newG);
     static bool replace(const std::set<NodePtr>& oldNodes, const std::set<NodePtr>& newNodes);
+    static bool replace(const std::shared_ptr<GraphView>& oldG, const std::shared_ptr<GraphView>& newG);
 
     /**
      * @brief Clone the GraphView with shared Operators. It is a new GraphView, with cloned Nodes, but the new Nodes refer to the same Operators as the original ones.
@@ -509,6 +518,11 @@ public:
      */
     void updateInputsOutputs();
 
+#ifdef PYBIND
+    std::string repr() const {
+        return fmt::format("GraphView(name='{}', Nodes: {} (inputs: {}, outputs: {}))", name(), mNodes.size(), mInputNodes.size(), mOutputNodes.size());
+    }
+#endif
 private:
 ///////////////////////////////////////////////////////
 //        TENSOR MANAGEMENT
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 7e9cfe399a1e13f281c999fafcf7d823276b7670..f694a1234b6037a0ae75a89380af9747765e290c 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -19,10 +19,19 @@
 #include <vector>
 #include <utility>
 
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <fmt/format.h>
+#endif
+
 #include "aidge/graph/Connector.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
 
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
 namespace Aidge {
 
 using NodePtr = std::shared_ptr<Node>;
@@ -184,9 +193,14 @@ public:
    */
   inline IOIndex_t getFirstFreeDataInput() const {
     IOIndex_t i = 0;
-    for (; (i < nbData()) && (input(i).second != gk_IODefaultIndex); ++i) {}
-    // assert((i<nbData()) && "No free data input for Node");
-    return (i < nbData()) ? i : gk_IODefaultIndex;
+    for (; i < nbInputs(); ++i) {
+      if ((inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData)
+        && input(i).second == gk_IODefaultIndex)
+      {
+        break;
+      }
+    }
+    return (i < nbInputs()) ? i : gk_IODefaultIndex;
   }
 
 
@@ -218,13 +232,12 @@ public:
   inline IOIndex_t nbInputs() const noexcept { return getOperator()->nbInputs(); }
 
   /**
-   * @brief Number of input specifically for data.
+   * @brief Category of a specific input (Data or Param, optional or not).
    * Data inputs exclude inputs expecting parameters (weights or bias).
-   * @details [data, data, weight, bias] => 2
-   * @return IOIndex_t
+   * @return InputCategory
    */
-  inline IOIndex_t nbData() const noexcept {
-    return getOperator()->nbData();
+  inline InputCategory inputCategory(IOIndex_t idx) const {
+    return getOperator()->inputCategory(idx);
   }
 
   /**
@@ -419,6 +432,27 @@ public:
 
   std::set<NodePtr> getNodeDelta(int delta,std::set<Aidge::NodePtr> nodeSee);
 
+#ifdef PYBIND
+    std::string repr() const {
+        std::string nodeString{fmt::format("Node(name='{}', optype='{}'", name(), type())};
+        if (mParents.size() > 0) {
+            std::vector<std::int8_t> connectedParents(mParents.size(), 0);
+            for (std::size_t i = 0; i < nbInputs(); ++i) {
+                if (mParents[i])
+                    connectedParents[i] = std::int8_t(1);
+            }
+            nodeString = fmt::format("{}, parents: {}", nodeString, connectedParents);
+        }
+        if (mChildren.size() > 0) {
+            std::vector<std::vector<std::int8_t>> connectedChildren{};
+            for (std::size_t i = 0; i < nbOutputs(); ++i) {
+                connectedChildren.push_back(std::vector<std::int8_t>(mChildren[i].size(), std::int8_t(1)));
+            }
+            nodeString = fmt::format("{}, children: {}", nodeString, connectedChildren);
+        }
+        return fmt::format("{})", nodeString);
+    }
+#endif
 
 private:
   ///////////////////////////////////////////////////////
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 4ac14bdaecd16e90586d14699f3b6f1bd6d88cab..0e709afe9f175443a28947be7f4c3f5b01f5e362 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -29,7 +29,7 @@ public:
     static const std::string Type;
 
     Add_Op(const IOIndex_t nbIn)
-        : OperatorTensor(Type, nbIn, 0, 1)
+        : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
     {
         if (nbIn == 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 9a9fced142ebc345c095c1eeca6b9a6c4270cf36..06ee4327e2f2d4df32c2decd73841bdf5f79a739 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -28,27 +28,31 @@ enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public OperatorTensor,
-                public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
-                public StaticAttributes<AvgPoolingAttr,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>> {
+                public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)> {
 
 public:
     static const std::string Type;
 
-    AvgPooling_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<AvgPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>>;
     template <AvgPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    AvgPooling_Op() = delete;
+
 
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
-                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {}
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+                        attr<AvgPoolingAttr::StrideDims>(stride_dims),
+                        attr<AvgPoolingAttr::KernelDims>(kernel_dims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -76,6 +80,10 @@ public:
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<AvgPoolingAttr::KernelDims>(); }
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
@@ -101,8 +109,6 @@ inline std::shared_ptr<Node> AvgPooling(
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
     return AvgPooling(to_array(kernel_dims), name, stride_dims);
 }
-
-
 }  // namespace Aidge
 
 extern template class Aidge::AvgPooling_Op<1>;
@@ -112,8 +118,10 @@ extern template class Aidge::AvgPooling_Op<4>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
-                                                          "KernelDims"};
+const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
+    "StrideDims",
+    "KernelDims"
+};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index aa53f8c43f0be2a0e094946d66fd263bc19e39f5..b5b64eb428d709e804dd9f6711530b348e0be747 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -28,21 +28,31 @@ enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public OperatorTensor,
-                public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
-                public StaticAttributes<BatchNormAttr, float, float> {
+                public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)> {
 public:
     static const std::string Type;
 
-    BatchNorm_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<BatchNormAttr, float, float>;
     template <BatchNormAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    BatchNorm_Op() = delete;
 
     constexpr BatchNorm_Op(float epsilon, float momentum)
-        : OperatorTensor(Type, 1, 4, 1),
-          Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
-                           attr<BatchNormAttr::Momentum>(momentum)) {}
+        : OperatorTensor(Type,
+                            {InputCategory::Data,
+                                InputCategory::Param,
+                                InputCategory::Param,
+                                InputCategory::Param,
+                                InputCategory::Param},
+                            1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<BatchNormAttr::Epsilon>(epsilon),
+            attr<BatchNormAttr::Momentum>(momentum))) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -72,6 +82,10 @@ public:
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& epsilon() const { return mAttributes->template getAttr<BatchNormAttr::Epsilon>(); }
+    inline float& momentum() const { return mAttributes->template getAttr<BatchNormAttr::Momentum>(); }
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "scale", "shift", "mean", "variance"};
     }
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 6efbc0a214dde3ca969226f734b5ee903fe5ab50..6911053932afff6675be4eb2c713d8d3cd34b462 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -19,8 +19,8 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -30,21 +30,31 @@ public:
     void forward() override;
 };
 
+enum class CastAttr { TargetType };
+
 class Cast_Op : public OperatorTensor,
     public Registrable<Cast_Op, std::string, std::unique_ptr<OperatorImpl>(const Cast_Op&)> {
 public:
     static const std::string Type;
 
-    Cast_Op() : OperatorTensor(Type, 1, 0, 1) {
-        mImpl = std::make_shared<Cast_OpImpl>(*this);
-    }
+private:
+    using Attributes_ = StaticAttributes<CastAttr, DataType>;
+    template <CastAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Cast_Op() = delete;
+
+    Cast_Op(const DataType targetType);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Cast_Op(const Cast_Op& op)
-        : OperatorTensor(op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Cast_Op, *this, op.backend());
@@ -64,6 +74,9 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline DataType& targetType() const { return mAttributes->template getAttr<CastAttr::TargetType>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -72,9 +85,15 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Cast(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Cast_Op>(), name);
+
+inline std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
 }
+} // namespace Aidge
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::CastAttr>::data[] = { "TargetType" };
 }
 
-#endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index a9a4c9253f3af9f9cd82390256ec70d066017cc5..8341a93fe66d260ae3687170629b8759d0305a9c 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -28,25 +28,32 @@
 namespace Aidge {
 class Concat_OpImpl : public OperatorImpl {
 public:
-    Concat_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Concat_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend)
+    {}
     void forward() override;
 };
 
 enum class ConcatAttr { Axis };
 
 class Concat_Op : public OperatorTensor,
-    public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)>,
-    public StaticAttributes<ConcatAttr, DimSize_t> {
+    public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)> {
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<ConcatAttr, DimSize_t>;
+private:
+    using Attributes_ = StaticAttributes<ConcatAttr, std::int32_t>;
     template <ConcatAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    Concat_Op(const IOIndex_t nbIn, const DimSize_t axis)
-        : OperatorTensor(Type, nbIn, 0, 1),
-          Attributes_(attr<ConcatAttr::Axis>(axis))
+public:
+    Concat_Op() = delete;
+
+    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis)
+        : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ConcatAttr::Axis>(axis)))
     {
         if (nbIn == 0) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
@@ -60,7 +67,7 @@ public:
      */
     Concat_Op(const Concat_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Concat_Op, *this, op.backend());
@@ -82,6 +89,9 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input_0", "data_input_n"};
     }
@@ -90,7 +100,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const DimIdx_t axis = 0, const std::string& name = "") {
+inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
 }
 }
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index c30282f3438889e233f3d9ed22ab7c7e795b2951..87ff5854b310ca472994bd6b68fd6ae58d31e806 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -30,41 +30,36 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvAttr { StrideDims, DilationDims, KernelDims, NoBias };
+enum class ConvAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
-                public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
-                public StaticAttributes<ConvAttr,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        bool> {
+                public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)> {
 
 public:
     static const std::string Type;
 
-    Conv_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ConvAttr,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        bool>;
+                                        std::array<DimSize_t, DIM>>;
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Conv_Op() = delete;
 
     constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                      bool noBias = false)
-        : OperatorTensor(Type, 1, 2, 1),
-          Attributes_(attr<ConvAttr::StrideDims>(strideDims),
-                      attr<ConvAttr::DilationDims>(dilationDims),
-                    //   attr<ConvAttr::InChannels>(inChannels),
-                    //   attr<ConvAttr::OutChannels>(outChannels),
-                      attr<ConvAttr::KernelDims>(kernelDims),
-                      attr<ConvAttr::NoBias>(noBias)) {}
+                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ConvAttr::StrideDims>(strideDims),
+            attr<ConvAttr::DilationDims>(dilationDims),
+            attr<ConvAttr::KernelDims>(kernelDims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -117,6 +112,12 @@ public:
         return getInput(1)->template dims<DIM+2>()[0];
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); }
+
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
@@ -148,10 +149,11 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
                                   bool noBias = false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
     addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
-    addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
-
+    if (!noBias) {
+        addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
+    }
     return conv;
 }
 
@@ -170,6 +172,7 @@ inline std::shared_ptr<Node> Conv(
 }
 }  // namespace Aidge
 
+extern template class Aidge::Conv_Op<1>;
 extern template class Aidge::Conv_Op<2>;
 
 namespace {
@@ -177,8 +180,7 @@ template <>
 const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
     "StrideDims",
     "DilationDims",
-    "KernelDims",
-    "NoBias"
+    "KernelDims"
 };
 }
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 7091421720aaf4291198823a6d7dcd732a8d9f99..c8a83ff7de62a61e8125eac29d61c3938115cd09 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -29,38 +29,36 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims, NoBias };
+enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
-                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
-                public StaticAttributes<ConvDepthWiseAttr,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       bool> {
+                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)> {
 public:
     static const std::string Type;
 
-    ConvDepthWise_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, DIM>,
-                                             bool>;
+                                             std::array<DimSize_t, DIM>>;
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    ConvDepthWise_Op() = delete;
 
     constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                               bool no_bias=false)
-        : OperatorTensor(Type, 1, 2, 1),
-          Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
-                      attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
-                      attr<ConvDepthWiseAttr::NoBias>(no_bias)) {}
+                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
+            attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
+            attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -93,6 +91,11 @@ public:
         return getInput(1)->template dims<DIM+2>()[0];
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
@@ -110,9 +113,11 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            bool noBias=false) {
     // FIXME: properly handle default w&b initialization in every cases
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
     addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
-    addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
+    if (!noBias) {
+        addProducer(convDW, 2, {nbChannels}, "b");
+    }
     return convDW;
 }
 
@@ -130,12 +135,13 @@ inline std::shared_ptr<Node> ConvDepthWise(
 }
 }  // namespace Aidge
 
+extern template class Aidge::ConvDepthWise_Op<1>;
 extern template class Aidge::ConvDepthWise_Op<2>;
 
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims",
-                                                          "KernelDims", "NoBias"};
+                                                          "KernelDims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 566f4a6ae69b090b3a035b034406d463eeb77317..3edb4a28851cffe060886a4660d6b524eb9b814a 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -30,7 +30,7 @@ class Div_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Div_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Div_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index 5ec10522e889bb1188b2304940fd892c0928b414..f615fedeef6fea59d2177cf886e8d910f064f5c2 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -29,7 +29,7 @@ class Erf_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Erf_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Erf_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 9f10970c4fd5b21a1cb92b334167d353f066e05b..01da37a05414c5994ace767770e7c26fc8cd4646 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -24,24 +24,15 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class FCAttr { NoBias };
-
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
-                                 std::shared_ptr<OperatorImpl>(const FC_Op &)>,
-              public StaticAttributes<FCAttr, bool> {
+                                 std::shared_ptr<OperatorImpl>(const FC_Op &)> {
 public:
     static const std::string Type;
 
-    FC_Op() = delete;
-
-    using Attributes_ = StaticAttributes<FCAttr, bool>;
-    template <FCAttr e> using attr = typename Attributes_::template attr<e>;
-
-    FC_Op(bool noBias)
-    : OperatorTensor(Type, 1, 2, 1),
-      Attributes_(attr<FCAttr::NoBias>(noBias))
+    FC_Op()
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1)
     {}
 
     /**
@@ -49,12 +40,11 @@ public:
      * @param op Operator to copy.
      */
     FC_Op(const FC_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
+        : OperatorTensor(op)
     {
-        if (op.mImpl){
+        if (op.mImpl) {
             SET_IMPL_MACRO(FC_Op, *this, op.backend());
-        }else{
+        } else {
             mImpl = nullptr;
         }
     }
@@ -90,16 +80,13 @@ public:
 
 inline std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), name);
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
     addProducer(fc, 1, {outChannels, inChannels}, "w");
-    addProducer(fc, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
+    if (!noBias) {
+        addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
+    }
     return fc;
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::FCAttr>::data[] = {"NoBias"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index a04e4be69c9fd1a6ed7753ed512c7f5e45b925d9..3e9b780732fa9144f2e58bef854d1b42d063d0bf 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_CORE_OPERATOR_GATHER_H_
 #define AIDGE_CORE_OPERATOR_GATHER_H_
 
-#include <cstdint>  // std::int64_t
+#include <cstdint>  // std::int8_t, std::int64_t
 #include <memory>
 #include <string>
 #include <vector>
@@ -36,21 +36,31 @@ enum class GatherAttr { Axis, Indices, GatheredShape };
 class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Gather_Op&)>,
-                public StaticAttributes<GatherAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>> {
-
+                                   std::shared_ptr<OperatorImpl>(const Gather_Op&)> {
 public:
     static const std::string Type;
 
+    using Attributes_ = StaticAttributes<GatherAttr,
+                                            std::int8_t,
+                                            std::vector<int64_t>,
+                                            std::vector<DimSize_t>>;
+private:
+    template <GatherAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
     Gather_Op() = delete;
 
-    using Attributes_ = StaticAttributes<GatherAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>>;
-    template <GatherAttr e> using attr = typename Attributes_::template attr<e>;
-    Gather_Op(std::int8_t axis, const std::vector<int64_t>& indices, const std::vector<DimSize_t>& gatheredShape)
-            : OperatorTensor(Type, 2, 0, 1),
-            Attributes_(attr<GatherAttr::Axis>(axis),
-                        attr<GatherAttr::Indices>(indices),
-                        attr<GatherAttr::GatheredShape>(gatheredShape))
+    Gather_Op(std::int8_t axis,
+              const std::vector<int64_t>& indices,
+              const std::vector<DimSize_t>& gatheredShape)
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+            attr<GatherAttr::Axis>(axis),
+            attr<GatherAttr::Indices>(indices),
+            attr<GatherAttr::GatheredShape>(gatheredShape)))
     {
         mImpl = std::make_shared<Gather_OpImpl>(*this);
     }
@@ -61,7 +71,7 @@ public:
      */
     Gather_Op(const Gather_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Gather_Op, *this, op.backend());
@@ -84,6 +94,11 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int8_t& axis() const { return mAttributes -> getAttr<GatherAttr::Axis>(); }
+    inline std::vector<int64_t>& indices() const { return mAttributes -> getAttr<GatherAttr::Indices>(); }
+    inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes -> getAttr<GatherAttr::GatheredShape>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "indices"};
     }
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 4ac9b4c1c40803309815f0ef1fb05c9e5a28e957..8196c4268e669001d99f25ed2cead546e1141aa7 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -26,16 +26,28 @@
 namespace Aidge {
 class GenericOperator_Op
     : public OperatorTensor,
-      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
-      public DynamicAttributes {
+      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> {
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
     ComputeDimsFunc mForwardDims;
 
+    const std::shared_ptr<DynamicAttributes> mAttributes;
+
 public:
+    GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut)
+        : OperatorTensor(type, inputsCategory, nbOut)
+    {
+        mImpl = std::make_shared<OperatorImpl>(*this);
+    }
+
     GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
-        : OperatorTensor(type, nbData, nbParam, nbOut)
+        : OperatorTensor(type, [nbData, nbParam]() {
+                                std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
+                                inputsCategory.resize(nbData + nbParam, InputCategory::Param);
+                                return inputsCategory;
+                            }(), nbOut),
+          mAttributes(std::make_shared<DynamicAttributes>())
     {
         mImpl = std::make_shared<OperatorImpl>(*this);
     }
@@ -45,7 +57,8 @@ public:
      * @param op Operator to copy.
      */
     GenericOperator_Op(const GenericOperator_Op& op)
-        : OperatorTensor(op)
+        : OperatorTensor(op),
+          mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
     {
         mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
     }
@@ -64,6 +77,22 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    template <class T>
+    inline T& getAttr(const std::string& name)
+    { return mAttributes -> template getAttr<T>(name); }
+    template <class T>
+    inline const T& getAttr(const std::string& name) const
+    { return mAttributes -> template getAttr<T>(name); }
+
+    ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
+    ///\tparam T expected Attribute type
+    ///\param name Attribute name
+    ///\param value Attribute value
+    template <class T>
+    inline void addAttr(const std::string& name, const T& value) const
+    { mAttributes -> template addAttr<T>(name, value); }
 
     // Helper functions that can be used with setForwardDims():
     static const ComputeDimsFunc Identity;
@@ -73,6 +102,20 @@ public:
     }
 };
 
+/**
+ * @brief Fictive custom operator not associated with any implementation.
+ * Allows to import unknown operators and simulate new ones.
+ * @param type Type of the fictive operator.
+ * @param inputCategory List inputs with their category
+ * @param nbOut Number of output data.
+ * @param name (optional) name of the Operator.
+ * @return std::shared_ptr<Node> Node associated with the Generic Operator.
+ */
+inline std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector<InputCategory>& inputCategory, IOIndex_t nbOut,
+                                             const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, inputCategory, nbOut), name);
+}
+
 /**
  * @brief Fictive custom operator not associated with any implementation.
  * Allows to import unknown operators and simulate new ones.
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 74529a0ba9481bf6280df8d3ce496f67635a5aef..8bb738e8b57598e4256d3850fc791976e73c834c 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -37,7 +37,7 @@ class GlobalAveragePooling_Op
 public:
   static const std::string Type;
 
-  GlobalAveragePooling_Op() : OperatorTensor(Type, 1, 0, 1) {}
+  GlobalAveragePooling_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
   GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op)
       : OperatorTensor(op) {
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index bcbe1c6c69e0a666d7a976558d558f101c5b8fca..393798da2fc26b3ef3f5e4cfe54f69fd82174a5f 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -42,7 +42,7 @@ public:
     static const std::string Type;
 
     Identity_Op()
-        : OperatorTensor(Type, 1, 0, 1)
+        : OperatorTensor(Type, {InputCategory::Data}, 1)
     {
         mImpl = std::make_shared<OperatorImpl>(*this);
     }
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 83a7c30fce7e0f68576f367d4b0bfe48edf4b3b6..294e7ebb009ff184c9150d2aa18067a15deeba22 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -12,16 +12,16 @@
 #ifndef AIDGE_CORE_OPERATOR_LEAKYRELU_H_
 #define AIDGE_CORE_OPERATOR_LEAKYRELU_H_
 
-#include <vector>
 #include <memory>
+#include <vector>
 
-#include "aidge/utils/StaticAttributes.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -30,20 +30,24 @@ enum class LeakyReLUAttr {
 };
 
 class LeakyReLU_Op : public OperatorTensor,
-    public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
-    public StaticAttributes<LeakyReLUAttr, float> {
+    public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)> {
 public:
     static const std::string Type;
 
-    LeakyReLU_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<LeakyReLUAttr, float>;
     template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    LeakyReLU_Op() = delete;
 
     LeakyReLU_Op(float negativeSlope)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(
-            attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(
+            std::make_shared<Attributes_>(
+                attr<LeakyReLUAttr::NegativeSlope>(negativeSlope)))
     {}
 
     /**
@@ -52,7 +56,7 @@ public:
      */
     LeakyReLU_Op(const LeakyReLU_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
@@ -76,6 +80,9 @@ public:
         mOutputs[0]->setBackend(name, device);
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Ln.hpp
index 09353e8a696c275e6a5dbadcfd8a254f69ad7f97..d4010471c9af853556dbe1d60c8585d12f8fc638 100755
--- a/include/aidge/operator/Ln.hpp
+++ b/include/aidge/operator/Ln.hpp
@@ -30,7 +30,7 @@ class Ln_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Ln_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Ln_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index 580d720e617e5b20c0acc7ce5e7f200fe5b25606..be460ee88bd79592e29581f6acd64813ecc39bec 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -30,7 +30,7 @@ class MatMul_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    MatMul_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    MatMul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 8aff1582604a9e23e248e7c01521567483c793ad..082aa26bbdf1d55dcae29d1ffb2b9810db8b17d0 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -34,30 +34,31 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public OperatorTensor,
-                public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
-                public StaticAttributes<MaxPoolingAttr,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       bool> {
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)> {
 public:
     static const std::string Type;
 
-    MaxPooling_Op() = delete;
-
     using Attributes_ = StaticAttributes<MaxPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              bool>;
+
+private:
     template <MaxPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    MaxPooling_Op() = delete;
 
     constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                             bool ceil_mode = false)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
-                      attr<MaxPoolingAttr::KernelDims>(kernel_dims),
-                      attr<MaxPoolingAttr::CeilMode>(ceil_mode))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<MaxPoolingAttr::StrideDims>(stride_dims),
+            attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+            attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
         {}
 
     /**
@@ -66,7 +67,7 @@ public:
      */
     MaxPooling_Op(const MaxPooling_Op<DIM>& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl) {
             SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
@@ -85,25 +86,22 @@ public:
 
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        if (!getInput(0)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-        }
-        if (!(getInput(0)->empty())) {
+        if (inputsAssociated()) {
             std::array<DimSize_t, DIM + 2> outputDims{};
             const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
             std::function<float(float)> roundingFunction;
-            if (this->template getAttr<MaxPoolingAttr::CeilMode>()) {
+            if (mAttributes->template getAttr<MaxPoolingAttr::CeilMode>()) {
                 roundingFunction = [](float x) { return std::ceil(x); };
             } else {
                 roundingFunction = [](float x) { return std::floor(x); };
             }
 
-            for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
+            for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                             roundingFunction(static_cast<float>(inputDims[dim+2] -
-                                                                    this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
-                                            static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
+                                                                    mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
+                                            static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
             }
             outputDims[1] = inputDims[1];
             outputDims[0] = inputDims[0];
@@ -119,6 +117,11 @@ public:
         mOutputs[0]->setBackend(name, device);
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<MaxPoolingAttr::KernelDims>(); }
+    inline bool& ceilMode() const { return mAttributes->template getAttr<MaxPoolingAttr::CeilMode>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 6b0ace2eb09fde069f8b9b104f92fc33811c25aa..d6af56f2faad18b9e39c793ea68e39eac4dd2f01 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -37,20 +37,25 @@ public:
 enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep };
 
 class Memorize_Op : public OperatorTensor,
-    public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)>,
-    public StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int> {
+    public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)> {
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int>;
+private:
+    using Attributes_ = StaticAttributes<MemorizeAttr, std::uint32_t, std::uint32_t, std::uint32_t>;
     template <MemorizeAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    Memorize_Op(const unsigned int endStep)
-        : OperatorTensor(Type, 1, 1, 2),
-          Attributes_(attr<MemorizeAttr::ScheduleStep>(0),
-                      attr<MemorizeAttr::ForwardStep>(0),
-                      attr<MemorizeAttr::EndStep>(endStep))
+public:
+    Memorize_Op() = delete;
+
+    Memorize_Op(const std::uint32_t endStep)
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
+          mAttributes(std::make_shared<Attributes_>(
+                        attr<MemorizeAttr::ScheduleStep>(0),
+                        attr<MemorizeAttr::ForwardStep>(0),
+                        attr<MemorizeAttr::EndStep>(endStep)))
     {
         mOutputs[1] = mOutputs[0];
     }
@@ -62,7 +67,7 @@ public:
      */
     Memorize_Op(const Memorize_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl) {
             SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
@@ -87,6 +92,11 @@ public:
     void updateConsummerProducer() override;
     void forward() override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<MemorizeAttr::ScheduleStep>(); }
+    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<MemorizeAttr::ForwardStep>(); }
+    inline std::uint32_t& endStep() const { return mAttributes->template getAttr<MemorizeAttr::EndStep>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "data_input_init"};
     }
@@ -95,7 +105,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Memorize(const unsigned int endStep, const std::string& name = "") {
+inline std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
 }
 }  // namespace Aidge
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index fb8c73af33dd081664c82427ea8aa6876117d695..744564b4bd591d84b871a6af71c4a54589103485 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -74,14 +74,7 @@ public:
     void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
     bool forwardDims(bool allowDataDependency = false) override final {
-        // Check first that all required inputs are available, otherwise
-        // mGraph->forwardDims() will fail!
-        bool forwarded = true;
-        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-            forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
-        }
-
-        if (forwarded) {
+        if (inputsAssociated()) {
             // Forward dims of micro-graph
             return mGraph->forwardDims({}, allowDataDependency);
         }
@@ -115,6 +108,7 @@ public:
     Elts_t getNbProducedData(IOIndex_t outputIdx) const override;
 
     void updateConsummerProducer() override;
+    void resetConsummerProducer() override;
     void forward() override;
     void backward() override {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for a MetaOperator");
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index eb57761cc5927cb4eedfb6cb12b1d49a0ee50b9c..51681629cbae215fd529b6e7bb568d07264dd63e 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -44,11 +44,13 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
-    addProducer(metaOp, 2, {out_channels}, "b");
+    if (!no_bias) {
+        addProducer(metaOp, 2, {out_channels}, "b");
+    }
     return metaOp;
 }
 
@@ -57,11 +59,10 @@ inline std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
 {
     auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), "");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
 
     return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
 }
@@ -94,11 +95,13 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
 {
     // Construct micro-graph
     auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
 
     auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
     addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
-    addProducer(metaOp, 2, {nb_channels}, "b");
+    if (!no_bias) {
+        addProducer(metaOp, 2, {nb_channels}, "b");
+    }
     return metaOp;
 }
 
@@ -107,11 +110,10 @@ inline std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
 {
     auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), "");
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
 
     return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
 }
@@ -203,8 +205,7 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels,
                            bool noBias = false,
                            const std::string& name = "");
 
-std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length,
-                                         bool noBias = false);
+std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
 
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index e9bcaa871619828a50dcd407d39744e7983fe2c4..cf5a3f188424fc52849eab580cce624ff714c729 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -35,7 +35,7 @@ class Move_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Move_Op() : OperatorTensor(Type, 1, 0, 1) {
+    Move_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {
         mImpl = std::make_shared<Move_OpImpl>(*this);
     }
 
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index f53a38a82a6771e416435222137e72366f5f69f3..e61393b28fc45bf46487ac2277753dec1b297b81 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -32,7 +32,7 @@ class Mul_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Mul_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Mul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 124512517b8c6a274ff426034c15424c82bb0030..adec17d07f39727a0c75d32fa24bcc624aa66e1a 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -18,11 +18,21 @@
 #include <utility>
 #include <cstddef>
 
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <fmt/format.h>
+#endif
+
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Data.hpp"
+#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/hook/Hook.hpp"
 
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
 namespace Aidge {
 
 enum class OperatorType {
@@ -30,6 +40,13 @@ enum class OperatorType {
     Tensor
 };
 
+enum class InputCategory {
+    Data,
+    Param,
+    OptionalData,
+    OptionalParam
+};
+
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
     std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
@@ -38,17 +55,15 @@ protected:
 private:
     std::string mType;
     const OperatorType mOperatorType;
-    const IOIndex_t mNbData;
-    const IOIndex_t mNbParam;
+    const std::vector<InputCategory> mInputsCategory;
     const IOIndex_t mNbOut;
 
 public:
     Operator() = delete;
-    Operator(const std::string& type, const IOIndex_t nbData, const IOIndex_t nbParam, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
+    Operator(const std::string& type, const std::vector<InputCategory>& inputsCategory, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
     : mType(type),
       mOperatorType(operatorType),
-      mNbData(nbData),
-      mNbParam(nbParam),
+      mInputsCategory(inputsCategory),
       mNbOut(nbOut)
     {
         // ctor
@@ -57,8 +72,7 @@ public:
     Operator(const Operator& op):
         std::enable_shared_from_this<Operator>(),
         mOperatorType(op.mOperatorType),
-        mNbData(op.mNbData),
-        mNbParam(op.mNbParam),
+        mInputsCategory(op.mInputsCategory),
         mNbOut(op.mNbOut)
     {
         mType = op.mType;
@@ -73,12 +87,14 @@ public:
 public:
     virtual std::shared_ptr<Operator> clone() const = 0;
 
+    virtual std::shared_ptr<Attributes> attributes() const { return nullptr; };
     /**
      * @brief Set the specified input with a shallow copy.
      * @param inputIdx Index of the input to set.
      * @param data Data to copy.
      */
     virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
+    virtual void resetInput(const IOIndex_t inputIdx) = 0;
 
     /**
      * @brief Set the specified input value by performing a deep copy of the given data.
@@ -179,11 +195,14 @@ public:
         return mOperatorType;
     }
 
+    inline InputCategory inputCategory(IOIndex_t idx) const {
+        AIDGE_ASSERT(idx < mInputsCategory.size(), "Input #{} out of range (number of inputs is {})", idx, mInputsCategory.size());
+        return mInputsCategory[idx];
+    }
+
     virtual inline bool isAtomic() const noexcept { return true; }
 
-    inline IOIndex_t nbInputs() const noexcept { return mNbData+mNbParam; };
-    inline IOIndex_t nbData() const noexcept { return mNbData; };
-    inline IOIndex_t nbParam() const noexcept { return mNbParam; };
+    inline IOIndex_t nbInputs() const noexcept { return mInputsCategory.size(); };
     inline IOIndex_t nbOutputs() const noexcept { return mNbOut; };
 
     static const std::vector<std::string> getInputsName() {
@@ -192,6 +211,17 @@ public:
     static const std::vector<std::string> getOutputsName() {
         return {};
     }
+
+#ifdef PYBIND
+    std::string repr() const {
+        return fmt::format("Operator(type = '{}', nb_in = {}, nb_out = {}, attr = {}, backend = {})",
+                    type(),
+                    nbInputs(),
+                    nbOutputs(),
+                    (attributes() ? attributes()->repr() : "None"),
+                    (mImpl ? "'"+backend()+"'" : "None"));
+    }
+#endif
 };
 } // namespace Aidge
 
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 1197adb9c525b3589c123ea1e9cd9f1f86a8d0b4..657a6d8ab6124b8919a3ac8fea5b6bfa6c4254b9 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -40,7 +40,7 @@ protected:
 public:
     OperatorTensor() = delete;
 
-    OperatorTensor(const std::string& type, const IOIndex_t nbData, const IOIndex_t nbParam,
+    OperatorTensor(const std::string& type, const std::vector<InputCategory>& inputsCategory,
                    const IOIndex_t nbOut);
 
     OperatorTensor(const OperatorTensor& other);
@@ -51,6 +51,7 @@ public:
     ///////////////////////////////////////////////////
     virtual void associateInput(const IOIndex_t inputIdx,
                                 const std::shared_ptr<Data>& data) override;
+    void resetInput(const IOIndex_t inputIdx) override final;
     ///////////////////////////////////////////////////
 
     ///////////////////////////////////////////////////
@@ -84,8 +85,11 @@ public:
 
     virtual void setDataType(const DataType& dataType) const override;
     virtual void setDataFormat(const DataFormat& dataFormat) const override;
-    
+
     virtual void forward() override;
+
+protected:
+    bool inputsAssociated(bool checkNonEmpty = true) const;
 };
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index a4e4ebdce801971de118ca8a263999046a13777d..5fd0f93986206e6cd958a85055159783eeb8bc8f 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -13,16 +13,16 @@
 #define AIDGE_CORE_OPERATOR_PAD_H_
 
 #include <array>
-#include <numeric>
+#include <memory>
+#include <string>
 #include <vector>
-#include <cmath>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -31,30 +31,31 @@ enum class PadBorderType { Constant, Edge, Reflect, Wrap };
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
-                public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
-                public StaticAttributes<PadAttr,
-                                       std::array<DimSize_t, 2*DIM>,
-                                       PadBorderType,
-                                       double> {
+                public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)> {
 public:
     static const std::string Type;
 
-    Pad_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<PadAttr,
-                                             std::array<DimSize_t, 2*DIM>,
-                                             PadBorderType,
-                                             double>;
+                                            std::array<DimSize_t, 2*DIM>,
+                                            PadBorderType,
+                                            double>;
     template <PadAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    Pad_Op() = delete;
 
     constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                      const PadBorderType &borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
-                           attr<PadAttr::BorderType>(borderType),
-                           attr<PadAttr::BorderValue>(borderValue)) {}
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<PadAttr::BeginEndBorders>(beginEndTuples),
+            attr<PadAttr::BorderType>(borderType),
+            attr<PadAttr::BorderValue>(borderValue))) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -62,7 +63,7 @@ public:
      */
     Pad_Op(const Pad_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {}
 
     /**
@@ -75,28 +76,22 @@ public:
 
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        bool associated = true;
-        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
+        if (inputsAssociated()) {
             std::array<DimSize_t, DIM + 2> outputDims{};
             const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
 
             for (std::size_t dim = 0; dim < DIM; ++dim) {
-                outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
+                outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
                                     + inputDims[dim+2]
-                                    + this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
+                                    + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
             }
             outputDims[1] = inputDims[1];
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
+            return true;
         }
 
-        return associated;
+        return false;
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
@@ -104,6 +99,11 @@ public:
         mOutputs[0]->setBackend(name, device);
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); }
+    inline PadBorderType& borderType() const noexcept { return mAttributes->template getAttr<PadAttr::BorderType>(); }
+    inline double& borderValue() const noexcept { return mAttributes->template getAttr<PadAttr::BorderValue>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -112,9 +112,6 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string Pad_Op<DIM>::Type = "Pad";
-
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                                            const std::string& name = "",
@@ -137,6 +134,9 @@ inline std::shared_ptr<Node> Pad(
 }
 }  // namespace Aidge
 
+extern template class Aidge::Pad_Op<1>;
+extern template class Aidge::Pad_Op<2>;
+
 namespace {
 template <>
 const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 2219f30ec9db7acf55491882a78e7a1ed2931cf0..575d56b455940ea98571110dbaa9a83de09fef37 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -34,18 +34,19 @@ public:
 enum class PopAttr { ForwardStep };
 
 class Pop_Op : public OperatorTensor,
-    public Registrable<Pop_Op, std::string, std::unique_ptr<OperatorImpl>(const Pop_Op&)>,
-    public StaticAttributes<PopAttr, unsigned int> {
+    public Registrable<Pop_Op, std::string, std::unique_ptr<OperatorImpl>(const Pop_Op&)> {
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<PopAttr, unsigned int>;
-    template <PopAttr e>
-    using attr = typename Attributes_::template attr<e>;
+private:
+    using Attributes_ = StaticAttributes<PopAttr, std::uint32_t>;
+    template <PopAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
+public:
     Pop_Op()
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<PopAttr::ForwardStep>(0))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0)))
     {
         mImpl = std::make_shared<Pop_OpImpl>(*this);
     }
@@ -56,7 +57,7 @@ public:
      */
     Pop_Op(const Pop_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Pop_Op, *this, op.backend());
@@ -80,6 +81,9 @@ public:
     void updateConsummerProducer() override;
     void forward() override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<PopAttr::ForwardStep>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index 08c4de2a254dd267eda4040b54108f93a0c2d922..ee5c01c2121d68a7988dc686c4dbb4bbf7331c84 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -29,7 +29,7 @@ class Pow_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Pow_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Pow_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index f547a45ab55e4f9a38c2f63c6e8af44430813668..9e3bdd1ba2f601da27dea3a6a01131a0c8191eb4 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -31,20 +31,24 @@ enum class ProdAttr { Constant };
 class Producer_Op
     : public OperatorTensor,
       public Registrable<Producer_Op, std::string, std::shared_ptr<OperatorImpl>(
-                                          const Producer_Op &)>,
-      public StaticAttributes<ProdAttr, bool> {
+                                          const Producer_Op &)> {
 public:
     static const std::string Type;
 
+private:
     using Attributes_ = StaticAttributes<ProdAttr, bool>;
-    template <ProdAttr e>
-    using attr = typename Attributes_::template attr<e>;
+    template <ProdAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Producer_Op() = delete;
 
     template <std::size_t DIM>
     Producer_Op(const std::array<DimSize_t, DIM>& dims,
                 bool constant = false)
-        : OperatorTensor(Type, 0, 0, 1),
-          Attributes_(attr<ProdAttr::Constant>(constant))
+        : OperatorTensor(Type, {}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ProdAttr::Constant>(constant)))
     {
         mOutputs[0]->resize(dims);
         mImpl = std::make_shared<OperatorImpl>(*this);
@@ -95,6 +99,9 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {};
     }
@@ -109,7 +116,7 @@ public:
     }
 
     void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) override {
-        if (getAttr<ProdAttr::Constant>()) {
+        if (mAttributes->template getAttr<ProdAttr::Constant>()) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
         }
         OperatorTensor::setOutput(outputIdx, data);
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 963de31c49f48784e92434b2b563d6c008e2d4fd..40b5d581d53521e6086d24c5ecc53f725dd9f252 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -30,7 +30,7 @@ class ReLU_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    ReLU_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    ReLU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index ff8d8b0696aafdab48cd37d049fa0473078d7ea6..3fcf19ffd13645fb28b6efcfefaf8e347b148c89 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -29,22 +29,28 @@ namespace Aidge {
 enum class ReduceMeanAttr { Axes, KeepDims };
 
 class ReduceMean_Op : public OperatorTensor,
-                public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>,
-                public StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t> {
+                public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)> {
 
-   public:
+public:
     static const std::string Type;
 
-    ReduceMean_Op() = delete;
-
-    using Attributes_ = StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t>;
+private:
+    using Attributes_ = StaticAttributes<ReduceMeanAttr,
+                                            std::vector<std::int32_t>,
+                                            DimSize_t>;
     template <ReduceMeanAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    ReduceMean_Op() = delete;
 
     ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<ReduceMeanAttr::Axes>(axes),
-                      attr<ReduceMeanAttr::KeepDims>(keep_dims)) {}
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ReduceMeanAttr::Axes>(axes),
+            attr<ReduceMeanAttr::KeepDims>(keep_dims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -52,7 +58,7 @@ class ReduceMean_Op : public OperatorTensor,
      */
     ReduceMean_Op(const ReduceMean_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
@@ -73,6 +79,11 @@ class ReduceMean_Op : public OperatorTensor,
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); }
+    inline DimSize_t& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
+
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 12fbda88b0044f836b298e0cf818724f53f821a7..4ea0cca30089555ff7979f141f94e5c84f04ffa1 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -32,22 +32,26 @@ public:
 enum class ReshapeAttr { Shape, AllowZero };
 
 class Reshape_Op : public OperatorTensor,
-                   public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)>,
-                   public StaticAttributes<ReshapeAttr, std::vector<std::int64_t>, bool> {
+                   public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)> {
 
 public:
     static const std::string Type;
 
-    Reshape_Op() = delete;
+private:
+    using Attributes_ = StaticAttributes<ReshapeAttr,
+                                            std::vector<std::int64_t>,
+                                            bool>;
+    template <ReshapeAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    using Attributes_ = StaticAttributes<ReshapeAttr, std::vector<std::int64_t>,  bool>;
-    template <ReshapeAttr e>
-    using attr = typename Attributes_::template attr<e>;
+public:
+    Reshape_Op() = delete;
 
     Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
-        : OperatorTensor(Type, 2, 0, 1),
-          Attributes_(attr<ReshapeAttr::Shape>(shape),
-                      attr<ReshapeAttr::AllowZero>(allowzero))
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ReshapeAttr::Shape>(shape),
+            attr<ReshapeAttr::AllowZero>(allowzero)))
     {
         mImpl = std::make_shared<Reshape_OpImpl>(*this);
     }
@@ -58,7 +62,7 @@ public:
      */
     Reshape_Op(const Reshape_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
@@ -81,6 +85,10 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
+    std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int64_t>& shape() const { return mAttributes->template getAttr<ReshapeAttr::Shape>(); }
+    inline bool& allowZero() const { return mAttributes->template getAttr<ReshapeAttr::AllowZero>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..565affc57ae8e7b1838466733b0f5d8fa8e1a6d6
--- /dev/null
+++ b/include/aidge/operator/Resize.hpp
@@ -0,0 +1,88 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_Resize_H_
+#define AIDGE_CORE_OPERATOR_Resize_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Resize_Op : public OperatorTensor,
+                  public Registrable<Resize_Op, std::string, std::shared_ptr<OperatorImpl>(const Resize_Op&)>{
+
+public:
+    static const std::string Type;
+
+    Resize_Op()
+        : OperatorTensor(Type,
+            {InputCategory::Data,
+                InputCategory::OptionalData,
+                InputCategory::OptionalData,
+                InputCategory::OptionalData},
+            1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+
+    Resize_Op(const Resize_Op& op)
+        : OperatorTensor(op)
+    {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Resize_Op, *this, op.backend());
+        }
+        else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Resize_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Resize_Op>(*this);
+    }
+
+    bool dimsForwarded() const override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+
+    static const std::vector<std::string> getInputsName(){
+        //  roi, scales, sizes, even if considered as const parameters/input
+        return {"data_input", "roi ", "scales", "sizes"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Resize(const std::string &name = "") {
+
+    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
+}
+
+}  // namespace Aidge
+
+
+#endif /* AIDGE_CORE_OPERATOR_Resize_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index c864bd045d8a5a1fc5f4ee591d1d81fcaf241bac..7d8e11b31546cd87a8d6b2d36e2929c9ef6df7a2 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -12,39 +12,42 @@
 #ifndef AIDGE_CORE_OPERATOR_SCALING_H_
 #define AIDGE_CORE_OPERATOR_SCALING_H_
 
+#include <cstddef>  // std::size_t
 #include <vector>
 #include <memory>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 enum class ScalingAttr {
-    scalingFactor, quantizedNbBits, isOutputUnsigned
+    ScalingFactor, QuantizedNbBits, IsOutputUnsigned
 };
 
-class Scaling_Op 
+class Scaling_Op
     : public OperatorTensor,
-      public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)>,
-      public StaticAttributes<ScalingAttr, float, size_t, bool> {
+      public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)> {
 public:
     static const std::string Type;
 
-    Scaling_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ScalingAttr, float, std::size_t, bool>;
     template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Scaling_Op() = delete;
 
     Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(
-            attr<ScalingAttr::scalingFactor>(scalingFactor),
-            attr<ScalingAttr::quantizedNbBits>(nbBits),
-            attr<ScalingAttr::isOutputUnsigned>(isOutputUnsigned))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ScalingAttr::ScalingFactor>(scalingFactor),
+            attr<ScalingAttr::QuantizedNbBits>(nbBits),
+            attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
     {}
 
     /**
@@ -53,7 +56,7 @@ public:
      */
     Scaling_Op(const Scaling_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
@@ -72,6 +75,11 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<ScalingAttr::ScalingFactor>(); }
+    inline std::size_t& quantizedNbBits() const noexcept { return mAttributes -> getAttr<ScalingAttr::QuantizedNbBits>(); }
+    inline bool& isOutputUnsigned() const noexcept { return mAttributes -> getAttr<ScalingAttr::IsOutputUnsigned>(); }
+
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
     }
@@ -85,10 +93,10 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
 }
 */
-inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, 
-                                     std::size_t quantizedNbBits=8, 
-                                     bool isOutputUnsigned=true, 
-                                     const std::string& name = "") 
+inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
+                                     std::size_t quantizedNbBits=8,
+                                     bool isOutputUnsigned=true,
+                                     const std::string& name = "")
 {
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
 }
@@ -97,7 +105,7 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
 namespace {
 template <>
 const char* const EnumStrings<Aidge::ScalingAttr>::data[]
-    = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
+    = {"ScalingFactor", "QuantizedNbBits", "IsOutputUnsigned"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 3132e4ab7adcc331772d627147cc31c25597570a..6d2d1b5e7c212fafa5ad6457d9e0a260e96b1c90 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -36,20 +36,24 @@ enum class ShapeAttr { Start, End };
 class Shape_Op : public OperatorTensor,
                 public Registrable<Shape_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Shape_Op&)>,
-                public StaticAttributes<ShapeAttr, std::int64_t, std::int64_t> {
+                                   std::shared_ptr<OperatorImpl>(const Shape_Op&)> {
 
 public:
     static const std::string Type;
 
-    Shape_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ShapeAttr, std::int64_t, std::int64_t>;
     template <ShapeAttr e> using attr = typename Attributes_::template attr<e>;
-    Shape_Op(std::int64_t start, std::int64_t end)
-            : OperatorTensor(Type, 1, 0, 1),
-            Attributes_(attr<ShapeAttr::Start>(start),
-                        attr<ShapeAttr::End>(end))
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Shape_Op() = delete;
+
+    Shape_Op(const std::int64_t start, const std::int64_t end)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ShapeAttr::Start>(start),
+            attr<ShapeAttr::End>(end)))
     {
         mImpl = std::make_shared<Shape_OpImpl>(*this);
     }
@@ -60,7 +64,7 @@ public:
      */
     Shape_Op(const Shape_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Shape_Op, *this, op.backend());
@@ -82,6 +86,10 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int64_t& start() const noexcept { return mAttributes -> getAttr<ShapeAttr::Start>(); }
+    inline std::int64_t& end() const noexcept { return mAttributes -> getAttr<ShapeAttr::End>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -90,7 +98,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Shape(std::int64_t start = 0, std::int64_t end = -1, const std::string& name = "") {
+inline std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
index 6e27cd6532db0e17751f6c664d14665909fb14b4..879edcac6a7ed9a78a2db8d82994071a6cf09635 100644
--- a/include/aidge/operator/ShiftGELU.hpp
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -32,7 +32,7 @@ class ShiftGELU_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    ShiftGELU_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    ShiftGELU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
index bb54d55ba609239e74b40f4ca96b646860e83a3a..f171130213b2e51ca8fc9905d93944198f849ce7 100644
--- a/include/aidge/operator/ShiftMax.hpp
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -32,7 +32,7 @@ class ShiftMax_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    ShiftMax_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    ShiftMax_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index bea9fc45eaa7f17f71963106b5bd3e1340a48a92..ae82d4a3a2d29755bba22b9a4194284310ac4f84 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -30,7 +30,7 @@ class Sigmoid_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Sigmoid_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Sigmoid_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index c8f16bb1ad769299a89d3f8a05e46960fe824711..7d425a0f3589e74b54ee0834fdc4291ea7f49bad 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -29,22 +29,29 @@ enum class SliceAttr { Starts, Ends, Axes, Steps };
 
 class Slice_Op
     : public OperatorTensor,
-      public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)>,
-      public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int8_t>, std::vector<std::int64_t>> {
-
+      public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)> {
 public:
     static const std::string Type;
 
+private:
+    using Attributes_ = StaticAttributes<SliceAttr,
+                                            std::vector<std::int64_t>,
+                                            std::vector<std::int64_t>,
+                                            std::vector<std::int8_t>,
+                                            std::vector<std::int64_t>>;
+    template <SliceAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
     Slice_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int8_t>, std::vector<std::int64_t>>;
-    template <SliceAttr e> using attr = typename Attributes_::template attr<e>;
     Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>& ends, const std::vector<std::int8_t>& axes, const std::vector<std::int64_t>& steps)
-        : OperatorTensor(Type, 5, 0, 1),
-          Attributes_(attr<SliceAttr::Starts>(starts),
-                      attr<SliceAttr::Ends>(ends),
-                      attr<SliceAttr::Axes>(axes),
-                      attr<SliceAttr::Steps>(steps))
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData, InputCategory::OptionalData}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<SliceAttr::Starts>(starts),
+            attr<SliceAttr::Ends>(ends),
+            attr<SliceAttr::Axes>(axes),
+            attr<SliceAttr::Steps>(steps)))
     {}
 
 
@@ -55,7 +62,7 @@ public:
      */
     Slice_Op(const Slice_Op &op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Slice_Op, *this, op.backend());
@@ -73,10 +80,16 @@ public:
     std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
 
     bool dimsForwarded() const override final;
-    bool forwardDims(bool allowDataDependency = false) override final;
+    bool forwardDims(bool allowDataDependency = true) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int64_t>& starts() const noexcept { return mAttributes -> getAttr<SliceAttr::Starts>(); }
+    inline std::vector<std::int64_t>& ends() const noexcept { return mAttributes -> getAttr<SliceAttr::Ends>(); }
+    inline std::vector<std::int8_t>& axes() const noexcept { return mAttributes -> getAttr<SliceAttr::Axes>(); }
+    inline std::vector<std::int64_t>& steps() const noexcept { return mAttributes -> getAttr<SliceAttr::Steps>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "starts", "ends", "axes", "steps"};
     }
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 1868dc6e3df48401ef3f8a126b07572e2f45144d..70f3a561ae5c9ba4720de8419bcd5aaf32a51e47 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -24,24 +24,29 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class SoftmaxAttr { AxisIdx };
+enum class SoftmaxAttr { Axis };
 
 class Softmax_Op : public OperatorTensor,
                 public Registrable<Softmax_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Softmax_Op&)>,
-                public StaticAttributes<SoftmaxAttr, std::size_t> {
+                                   std::shared_ptr<OperatorImpl>(const Softmax_Op&)> {
 
 public:
     static const std::string Type;
 
+private:
+    using Attributes_ = StaticAttributes<SoftmaxAttr, std::int32_t>;
+    template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
     Softmax_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SoftmaxAttr, std::size_t>;
-    template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
-    Softmax_Op(std::size_t axis)
-            :  OperatorTensor(Type, 1, 0, 1),
-            Attributes_(attr<SoftmaxAttr::AxisIdx>(axis)) {}
+    Softmax_Op(std::int32_t axis)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+                attr<SoftmaxAttr::Axis>(axis)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -49,7 +54,7 @@ public:
      */
     Softmax_Op(const Softmax_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (op.mImpl){
             SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
@@ -68,6 +73,10 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<SoftmaxAttr::Axis>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -76,7 +85,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Softmax(std::size_t axis, const std::string& name = "") {
+inline std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index ff50a6aa7b8de971431515a09ca4e684dcc51865..42baf66e6722c6f9a0d3f40f12d4f4685fcc6980 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -45,7 +45,7 @@ public:
     using Attributes_ = StaticAttributes<SplitAttr,  std::int8_t, std::vector<DimSize_t>>;
     template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
     Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split)
-        : OperatorTensor(Type, 2, 0, nbOutputs),
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
           Attributes_(attr<SplitAttr::Axis>(axis),
                       attr<SplitAttr::Split>(split))
     {
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index f5ffa431192d73a703c1ce973cb485dadb31420d..05b20286bc3f576d4e43fbece26ae270b3e583e6 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -33,7 +33,7 @@ public:
 public:
     static const std::string Type;
 
-    Sqrt_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Sqrt_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index e5d8442851c35e9232fdd77d862fb48b71c76f1f..fc30e51c9a6daed56a2e0e665be645739961aa6b 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -33,7 +33,7 @@ public:
 public:
     static const std::string Type;
 
-    Sub_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Sub_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index 3fd5377d30cfff864743dcab2da9e690e26e5263..b5f183a90aeeb4ef424c318e8942a818b568b44a 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -28,7 +28,7 @@ class Tanh_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Tanh_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Tanh_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index f1a7fe477fd77baf86d48c7c5bd67c6ea074a1bc..72096448ebf0e00d73e33bdab094ca7f0b7d0633 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -27,28 +27,33 @@
 namespace Aidge {
 class TransposeImpl : public OperatorImpl {
 public:
-    TransposeImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    TransposeImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend)
+    {}
     void forward() override;
 };
 
 enum class TransposeAttr { OutputDimsOrder };
 
 class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)>,
-                public StaticAttributes<TransposeAttr, std::vector<DimSize_t>> {
+                public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)> {
 
-   public:
+public:
     static const std::string Type;
 
-    Transpose_Op() = delete;
 
+private:
     using Attributes_ = StaticAttributes<TransposeAttr, std::vector<DimSize_t>>;
-    template <TransposeAttr e>
-    using attr = typename Attributes_::template attr<e>;
+    template <TransposeAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Transpose_Op() = delete;
 
     Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)))
     {
         mImpl = std::make_shared<TransposeImpl>(*this);
     }
@@ -59,7 +64,7 @@ class Transpose_Op : public OperatorTensor,
      */
     Transpose_Op(const Transpose_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
@@ -81,6 +86,9 @@ class Transpose_Op : public OperatorTensor,
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
diff --git a/include/aidge/recipes/GraphViewHelper.hpp b/include/aidge/recipes/GraphViewHelper.hpp
index a2c571bf4ed164729f7c3416c814b913b4d07e6f..3b8ba7627362c945a6bfbe587ec952fdda013e98 100644
--- a/include/aidge/recipes/GraphViewHelper.hpp
+++ b/include/aidge/recipes/GraphViewHelper.hpp
@@ -39,8 +39,6 @@ std::set<std::shared_ptr<Tensor>> producers(std::shared_ptr<GraphView> graphview
  */
 std::set<std::shared_ptr<Tensor>> parameters(std::shared_ptr<GraphView> graphview);
 
-void compile_gradient(std::shared_ptr<Aidge::GraphView> gv);
-
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_GRAPHVIEWHELPER_H_ */
diff --git a/include/aidge/scheduler/SequentialScheduler.hpp b/include/aidge/scheduler/SequentialScheduler.hpp
index a7929fde8a2affdd562d70d11a7c809aaf3357d0..35dafead6dc424550df7d83d54f5ec998c3b4d86 100644
--- a/include/aidge/scheduler/SequentialScheduler.hpp
+++ b/include/aidge/scheduler/SequentialScheduler.hpp
@@ -54,7 +54,7 @@ public:
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    void backward(bool instantiateGrad = true);
+    void backward();
 
 private:
     SchedulingPolicy mSchedulingPolicy;
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index 927686cfd5cca910c5ffb25364ae4bc971ad18bf..c1f6a8a7f704b4bd813983cb178d9e5acba5a5e1 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -12,15 +12,14 @@
 #ifndef AIDGE_CORE_UTILS_ATTRIBUTES_H_
 #define AIDGE_CORE_UTILS_ATTRIBUTES_H_
 
-#ifdef PYBIND
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
-#endif
-#include <vector>
 #include <string>
 #include <set>
 
 #ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <fmt/format.h>
+
 namespace py = pybind11;
 #endif
 
@@ -36,26 +35,53 @@ namespace Aidge {
 template<class T, std::size_t N>
 constexpr std::size_t size(T (&)[N]) { return N; }
 
+
 /* This abstract class allows to avoid binding Attributes.
 *  Otherwise we would need to bind every template possible of Attributes.
 *  Every operators can access the methods of this class by inheriting from
 *  Attributes in the binding code.
 */
 class Attributes {
+protected:
+    /**
+     * @brief Convert snake_case to PascalCase.
+     * @param snakeCase string to convert.
+    */
+    static std::string snakeToPascal(const std::string& snakeCase);
+
+
+    /**
+     * @brief Convert PascalCase to snake_case.
+     * @param pascalCase string to convert.
+    */
+    static std::string pascalToSnake(const std::string& pascalCase);
+
+    /**
+     * @brief Check whether a given string is in PascalCase.
+     * @param str String to check.
+     */
+    static bool isPascalCase(const std::string& str);
+
+    /**
+     * @brief Check whether a given string is in snake_case.
+     * @param str String to check.
+     */
+    static bool isSnakeCase(const std::string& str);
+
 public:
     /**
      * @brief Check if the attribute exists.
      * @param name Name of the attribute to check.
      * @return bool True if the attribute exists, false otherwise.
     */
-    virtual bool hasAttr(const std::string& name) const = 0;
+    virtual bool hasAttr(const std::string& /*name*/) const = 0;
 
     /**
      * @brief Get the (implementation defined) name of the type of an attribute, returned by std::type_info::name.
      * @param name Name of the attribute.
      * @return std::string Name of the type as returned by std::type_info::name.
     */
-    virtual std::string getAttrType(const std::string& name) const = 0;
+    virtual std::string getAttrType(const std::string& /*name*/) const = 0;
 
     /**
      * @brief Get the attribute's name list.
@@ -64,16 +90,25 @@ public:
     virtual std::set<std::string> getAttrsName() const = 0;
 
 #ifdef PYBIND
+    virtual bool hasAttrPy(const std::string& name) const = 0;
+
     /* Bindable get function, does not recquire any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from its return type.
     */
-    virtual py::object getAttrPy(const std::string& name) const = 0;
+    virtual py::object getAttrPy(const std::string& name) const  = 0;
     /* Bindable set function, does not recquire any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from ``value`` type.
     */
-    virtual void setAttrPy(const std::string& name, py::object&& value) = 0;
+    virtual void setAttrPy(const std::string& /*name*/, py::object&& /*value*/) = 0;
+
+    virtual std::string str() const = 0;
+
+    virtual std::string repr() const = 0;
+
+    virtual py::dict dict() const = 0;
+
 #endif
     virtual ~Attributes() {}
 };
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 113377b33d9827c3428eeb0adc92111f75c22abb..c5054eb2fd2e8bfa5e7fca898f343ce630643dbd 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -48,11 +48,12 @@ public:
      */
     template<class T> T& getAttr(const std::string& name)
     {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
 #ifdef PYBIND
         // If attribute does not exist in C++, it might have been created or modified in Python
         auto it = mAttrs.find(name);
         if (it == mAttrs.end()) {
-            auto itPy = mAttrsPy.find(name);
+            auto itPy = mAttrsPy.find(pascalToSnake(name));
             if (itPy != mAttrsPy.end()) {
                 // Insert the attribute back in C++
                 mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
@@ -65,11 +66,12 @@ public:
 
     template<class T> const T& getAttr(const std::string& name) const
     {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
 #ifdef PYBIND
         // If attribute does not exist in C++, it might have been created or modified in Python
         auto it = mAttrs.find(name);
         if (it == mAttrs.end()) {
-            auto itPy = mAttrsPy.find(name);
+            auto itPy = mAttrsPy.find(pascalToSnake(name));
             if (itPy != mAttrsPy.end()) {
                 // Insert the attribute back in C++
                 mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
@@ -86,6 +88,7 @@ public:
     ///\param value Attribute value
     template<class T> void addAttr(const std::string& name, const T& value)
     {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
         const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
         AIDGE_ASSERT(res.second, "attribute already exists");
 
@@ -93,7 +96,7 @@ public:
         // We cannot handle Python object if the Python interpreter is not running
         if (Py_IsInitialized()) {
             // Keep a copy of the attribute in py::object that is updated everytime
-            mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+            mAttrsPy.emplace(std::make_pair(pascalToSnake(name), py::cast(value)));
         }
 #endif
     }
@@ -129,7 +132,8 @@ public:
 #ifdef PYBIND
     void addAttrPy(const std::string& name, py::object&& value)
     {
-        auto it = mAttrs.find(name);
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python.");
+        auto it = mAttrs.find(snakeToPascal(name));
         AIDGE_ASSERT(it == mAttrs.end(), "attribute already exists");
 
         const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
@@ -138,26 +142,52 @@ public:
 
     void setAttrPy(const std::string& name, py::object&& value) override final
     {
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python.");
         auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
         if (!resPy.second)
             resPy.first->second = std::move(value);
 
         // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
-        mAttrs.erase(name);
+        const std::string pascalName = snakeToPascal(name);
+        mAttrs.erase(pascalName);
+    }
+
+    py::dict dict() const override {
+        py::dict attributes;
+        for (const auto& elt : mAttrsPy) {
+            const std::string snakeName = pascalToSnake(elt.first);
+            attributes[snakeName.c_str()] = elt.second;
+        }
+        return attributes;
+    }
+
+    std::string str() const override {
+        return repr();
     }
+
+    std::string repr() const override {
+        // Call the __repr__ method of the base class py::dict
+        return fmt::format("AttrDict({})",  static_cast<std::string>(py::str(dict())));
+        // return fmt::format("AttrDict({})",  dict().attr("__repr__")().cast<std::string>());
+    }
+
 #endif
 
     //////////////////////////////////////
     ///     Generic Attributes API
     //////////////////////////////////////
     bool hasAttr(const std::string& name) const override final {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
+        return (mAttrs.find(name) != mAttrs.cend());
+    }
+
 #ifdef PYBIND
+    bool hasAttrPy(const std::string& name) const override final {
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python.");
         // Attributes might have been created in Python, the second condition is necessary.
-        return (mAttrs.find(name) != mAttrs.end() || mAttrsPy.find(name) != mAttrsPy.end());
-#else
-        return (mAttrs.find(name) != mAttrs.end());
-#endif
+        return (mAttrs.find(snakeToPascal(name)) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
     }
+#endif
 
     std::string getAttrType(const std::string& name) const override final {
         // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is:
@@ -195,7 +225,7 @@ public:
      * generic type caster for std::any is not feasable.
      * The strategy here is to keep a copy of each attribute in py::object that is updated everytime.
     */
-    py::object getAttrPy(const std::string& name) const override final {
+    inline py::object getAttrPy(const std::string& name) const override final {
         return mAttrsPy.at(name);
     };
 #endif
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index a01f81629c8425f9d860bf1ea03bfe421dbd04fa..a400f8046d07df4ff4493470737f5c4d42945db7 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -14,6 +14,7 @@
 #define AIDGE_LOG_H_
 
 #include <memory>
+#include <vector>
 
 #include <fmt/format.h>
 #include <fmt/ranges.h>
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 6bf59155373cf73d158fce4eb5bda58f7d279e69..8fc88ff79c50751ba7b79662fc9fc430d4ed601d 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -12,11 +12,16 @@
 #ifndef AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
 #define AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
 
-#include <tuple>
+#include <array>
 #include <cassert>
 #include <cstddef>
+#include <string>
+#include <tuple>
 #include <typeinfo>
-#include <array>
+
+#ifdef PYBIND
+#include <fmt/format.h>
+#endif
 
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
@@ -149,8 +154,9 @@ public:
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
     }
 
-    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
-    constexpr typename std::enable_if<(SIZE > 0), const std::type_info&>::type getAttrType(std::size_t i) const {
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value,
+                std::enable_if_t<(SIZE > 0), bool> = true>
+    constexpr const std::type_info& getAttrType(std::size_t i) const {
         if (i == SIZE-1) {
             return typeid(typename std::tuple_element<SIZE-1,std::tuple<T...>>::type);
         }
@@ -159,8 +165,9 @@ public:
         }
     }
 
-    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
-    [[noreturn]] typename std::enable_if<(SIZE == 0), const std::type_info&>::type getAttrType(std::size_t /*i*/) const {
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value,
+                std::enable_if_t<(SIZE == 0), bool> = true>
+    [[noreturn]] const std::type_info& getAttrType(std::size_t /*i*/) const {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
     }
 
@@ -173,6 +180,7 @@ public:
     //////////////////////////////////////
     // Runtime existance check with name
     bool hasAttr(const std::string& name) const override final {
+        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 return true;
@@ -182,6 +190,20 @@ public:
         return false;
     }
 
+#ifdef PYBIND
+        bool hasAttrPy(const std::string& name) const override final {
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python, got '{}'.", name);
+        const std::string pascalName = snakeToPascal(name);
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (pascalName == EnumStrings<ATTRS_ENUM>::data[i]) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+#endif
+
     // Runtime type access with name
     std::string getAttrType(const std::string& name) const override final {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
@@ -212,28 +234,40 @@ public:
     static std::set<std::string> staticGetAttrsName() {
         std::set<std::string> attrsName;
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            attrsName.insert(EnumStrings<ATTRS_ENUM>::data[i]);
+            attrsName.insert(pascalToSnake(std::string(EnumStrings<ATTRS_ENUM>::data[i])));
         }
         return attrsName;
     }
 
 
     py::object getAttrPy(const std::string& name) const override {
+        if (name == "__dict__") {
+            return py::none();
+        }
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python, got '{}'.", name);
+        const std::string pascalName = snakeToPascal(name);
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+            if (pascalName == EnumStrings<ATTRS_ENUM>::data[i]) {
                 // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
-                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
+                // Normal accessor would not work as we convert the tuple to a py::object which can be anything
                 return py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
             }
         }
-
-        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name);
+        // if (name == "_ipython_canary_method_should_not_exist_") {
+            // fmt::print("dict call {}", py::str(dict().attr("__getitem__")(name)).cast<std::string>());
+        // }
+        // ipython tries special methods and attributes (e.g "_ipython_canary_method_should_not_exist_") that require to throw
+        throw py::attribute_error(fmt::format("attribute \"{}\" not found.", name));
+        // AIDGE_THROW_OR_ABORT(py::key_error, "attribute \"{}\" not found in Python attribute getter", name);
+        // return py::none();
     }
 
 
     void setAttrPy(const std::string& name, py::object&& value) override final{
+        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python, got '{}'.", name);
+        const std::string pascalName = snakeToPascal(name);
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+            if (pascalName == EnumStrings<ATTRS_ENUM>::data[i]) {
                 // Cannot update attribute using reference has it would require templating
                 // Use a dirty
                 auto tmpAttr = py::cast(mAttrs);
@@ -242,8 +276,42 @@ public:
                 return;
             }
         }
-        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name);
+        throw py::attribute_error(fmt::format("attribute \"{}\" not found.", name));
+    }
+
+    py::dict dict() const override {
+        py::dict attributes;
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            const std::string snakeName = pascalToSnake(EnumStrings<ATTRS_ENUM>::data[i]);
+                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
+                // Normal accessor would not work as we convert the tuple to a py::object which can be anything
+            attributes[snakeName.c_str()] = py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+        }
+        return attributes;
     }
+
+    std::string str() const override {
+        return repr();
+    }
+
+    std::string repr() const override {
+        // Call the __repr__ method of the base class py::dict
+        return fmt::format("AttrDict({})",  static_cast<std::string>(py::str(dict())));
+        // return fmt::format("AttrDict({})",  dict().attr("__repr__")().cast<std::string>());
+    }
+
+    std::size_t len() const {
+        return size(EnumStrings<ATTRS_ENUM>::data);
+    }
+    // AttrDict get_a() const {
+    //     py::dict attributes_;
+    //     for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+    //         const std::string snakeName = pascalToSnake(std::string(EnumStrings<ATTRS_ENUM>::data[i]));
+    //         attributes_[snakeName.c_str()] = py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+    //     }
+    //     return AttrDict(attributes_);
+    // }
+
     #endif
 
 private:
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index 955b510e6cce6712e4738c0064836dbb733a3c3d..c6595360b17ee08eaa82d483987914adc67b60a8 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -10,6 +10,7 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+
 #include "aidge/data/Data.hpp"
 
 namespace py = pybind11;
@@ -17,18 +18,18 @@ namespace Aidge {
 
 void init_Data(py::module& m){
     // TODO : extend with more values !
-    py::enum_<DataType>(m, "DataType")
-    .value("Float64", DataType::Float64)
-    .value("Float32", DataType::Float32)
-    .value("Float16", DataType::Float16)
-    .value("Int8", DataType::Int8)
-    .value("Int16", DataType::Int16)
-    .value("Int32", DataType::Int32)
-    .value("Int64", DataType::Int64)
-    .value("UInt8", DataType::UInt8)
-    .value("UInt16", DataType::UInt16)
-    .value("UInt32", DataType::UInt32)
-    .value("UInt64", DataType::UInt64)
+    py::enum_<DataType>(m, "dtype")
+    .value("float64", DataType::Float64)
+    .value("float32", DataType::Float32)
+    .value("float16", DataType::Float16)
+    .value("int8", DataType::Int8)
+    .value("int16", DataType::Int16)
+    .value("int32", DataType::Int32)
+    .value("int64", DataType::Int64)
+    .value("uint8", DataType::UInt8)
+    .value("uint16", DataType::UInt16)
+    .value("uint32", DataType::UInt32)
+    .value("uint64", DataType::UInt64)
     ;
 
     py::class_<Data, std::shared_ptr<Data>>(m,"Data");
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 3533f2b84623338d35403cb6d858db5914383b8d..83bb4afeacdd6de181fd6738edad2229736854c8 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -84,7 +84,6 @@ void init_Tensor(py::module& m){
     .def("grad", &Tensor::grad)
     .def("set_grad", &Tensor::setGrad)
     .def("dtype", &Tensor::dataType)
-    .def("init_grad", &Tensor::initGrad)
     .def("size", &Tensor::size)
     .def("capacity", &Tensor::capacity)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize)
@@ -93,10 +92,14 @@ void init_Tensor(py::module& m){
     .def("get_idx", &Tensor::getIdx)
     .def_static("get_available_backends", &Tensor::getAvailableBackends)
     .def("__str__", [](Tensor& b) {
-        return b.toString();
+        if (b.empty()) {
+            return std::string("{}");
+        } else {
+            return b.toString();
+        }
     })
     .def("__repr__", [](Tensor& b) {
-        return "Tensor(dtype = " + std::string(EnumStrings<DataType>::data[static_cast<int>(b.dataType())]) + ",\n" + b.toString() + ")";
+        return fmt::format("Tensor(dims = {}, dtype = {})", b.dims(), std::string(EnumStrings<DataType>::data[static_cast<int>(b.dataType())]));
     })
     .def("__len__", [](Tensor& b) -> size_t{
         return b.size();
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 4e74be8878eb3ca081fd2d5457e42768f4026be5..2930383817d1555d51b8bddd8eff6402240e905a 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -34,6 +34,8 @@ void init_GraphView(py::module& m) {
           .def("in_view", (bool (GraphView::*)(const std::string&) const) &GraphView::inView)
           .def("root_node", &GraphView::rootNode)
           .def("set_root_node", &GraphView::setRootNode, py::arg("node"))
+          .def("__repr__", &GraphView::repr)
+          .def("__len__", [](const GraphView& g){ return g.getNodes().size(); })
           .def("log_outputs", &GraphView::logOutputs, py::arg("path"))
           .def("get_ordered_inputs", &GraphView::getOrderedInputs)
           .def("get_ordered_outputs", &GraphView::getOrderedOutputs)
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index b22ebdd0f6cdb5bd738cd164b3fc2e9fe36d9987..06c171214d5df261e5df832179a0fa69420aab7d 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -48,6 +48,8 @@ void init_Node(py::module& m) {
     :rtype: str
     )mydelimiter")
 
+    .def("__repr__", &Node::repr)
+
     .def("add_child",
          (void (Node::*)(std::shared_ptr<Node>, const IOIndex_t, IOIndex_t)) &
                  Node::addChild,
@@ -132,11 +134,12 @@ void init_Node(py::module& m) {
     :rtype: int
     )mydelimiter")
 
-    .def("get_nb_data", &Node::nbData,
+    .def("input_category", &Node::inputCategory, py::arg("idx"),
     R"mydelimiter(
-    Number of data inputs.
+    Category of a specific input (Data or Param, optional or not).
+    Data inputs exclude inputs expecting parameters (weights or bias).
 
-    :rtype: int
+    :rtype: InputCategory
     )mydelimiter")
 
     .def("get_nb_outputs", &Node::nbOutputs,
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 966def88033dee8cd6cee06d80dc32114050b430..0587554b722c99d009a248ce963f80cb4fd892ec 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -21,22 +21,31 @@
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/StaticAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
+
   const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D");
-  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
+  const std::string pyStaticAttrClassName("StaticAttributes" + pyClassName);
+//   py::class_<StaticAttributes<AvgPoolingAttr,
+//                                              std::array<DimSize_t, DIM>,
+//                                              std::array<DimSize_t, DIM>>,
+//     std::shared_ptr<StaticAttributes<AvgPoolingAttr,
+//                                              std::array<DimSize_t, DIM>,
+//                                              std::array<DimSize_t, DIM>>>, Attributes>(m, pyStaticAttrClassName.c_str());
+
+  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor>(
         m, pyClassName.c_str(),
         py::multiple_inheritance())
     .def(py::init<const std::array<DimSize_t, DIM> &,
                   const std::array<DimSize_t, DIM> &>(),
             py::arg("kernel_dims"),
-            py::arg("stride_dims"))
+            py::arg("stride_dims") = create_array<DimSize_t,DIM>(1))
     .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
-    .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
+    .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName);
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
 
@@ -54,6 +63,9 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 
 
 void init_AvgPooling(py::module &m) {
+//   py::enum_<AvgPoolingAttr>(m, "_AvgPoolingAttr")
+    // .value("kernel_dims", AvgPoolingAttr::KernelDims)
+    // .value("stride_dims", AvgPoolingAttr::StrideDims);
   declare_AvgPoolingOp<1>(m);
   declare_AvgPoolingOp<2>(m);
   declare_AvgPoolingOp<3>(m);
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 4ec25e02a50330bdf764b598b598836a251d65ea..42e31de2c7c8ba440cd8e479cf9285b398970b42 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -9,9 +9,10 @@
  *
  ********************************************************************************/
 
-#include <pybind11/pybind11.h>
 #include <string>
 
+#include <pybind11/pybind11.h>
+
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
@@ -23,13 +24,13 @@ namespace Aidge {
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
     const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance())
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance())
         .def(py::init<float, float>(),
             py::arg("epsilon"),
             py::arg("momentum"))
         .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
-        .def_static("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
+        .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
 
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 07bb9f2fc16fcbefb693aeec00c380661f4a6e44..9f02e04a41b20599a6cfe878f53db04c6d5bbe34 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -20,13 +20,12 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Concat(py::module& m) {
-    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, Attributes, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
-        .def(py::init<const IOIndex_t, const DimSize_t>(),
+    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
+        .def(py::init<const IOIndex_t, const int>(),
                 py::arg("nb_inputs"),
                 py::arg("axis"))
         .def_static("get_inputs_name", &Concat_Op::getInputsName)
-        .def_static("get_outputs_name", &Concat_Op::getOutputsName)
-        .def_static("attributes_name", &Concat_Op::staticGetAttrsName);
+        .def_static("get_outputs_name", &Concat_Op::getOutputsName);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
 
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index c1a4f1319e4e715add01417f86d17bddadb992f1..61fb37e788021757fa6c3aced9a5f4c30fb60548 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -27,25 +27,22 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
   const std::string pyClassName("ConvOp" + std::to_string(DIM) + "D");
-  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
+  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
         .def(py::init([](const std::vector<DimSize_t>& kernel_dims,
                          const std::vector<DimSize_t> &stride_dims,
-                         const std::vector<DimSize_t> &dilation_dims,
-                         bool no_bias) {
+                         const std::vector<DimSize_t> &dilation_dims) {
             AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
             AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
             AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-            return new Conv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+            return new Conv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
         }), py::arg("kernel_dims"),
             py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-            py::arg("no_bias") = false)
+            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
         .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
-        .def_static("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
         .def("in_channels", &Conv_Op<DIM>::inChannels)
         .def("out_channels", &Conv_Op<DIM>::outChannels)
         ;
@@ -75,7 +72,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
 
 
 void init_Conv(py::module &m) {
-//   declare_ConvOp<1>(m);
+  declare_ConvOp<1>(m);
   declare_ConvOp<2>(m);
 //   declare_ConvOp<3>(m);
 }
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index ce286094d6606d8b7161acf9e3fb3c6cbcbb88c9..080df1832bf92a9db9d26e1fa18b652dc70c2a42 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -28,20 +28,17 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
   const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D");
-  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
+  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                bool>(),
+                const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("dilation_dims"),
-        py::arg("no_bias"))
+        py::arg("dilation_dims"))
   .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
-  .def_static("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName)
   .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels);
 
   declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName);
@@ -67,7 +64,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
 
 
 void init_ConvDepthWise(py::module &m) {
-//   declare_ConvDepthWiseOp<1>(m);
+  declare_ConvDepthWiseOp<1>(m);
   declare_ConvDepthWiseOp<2>(m);
 //   declare_ConvDepthWiseOp<3>(m);
 
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 6cff90d0ad3aacf4cf8a465408eb490e3f21abda..9e0d61bc3a4d957e98db39577e120da5fe97ebea 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -15,18 +15,28 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
+
+
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, Attributes, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
-    .def(py::init<bool>(), py::arg("no_bias"))
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
+    .def(py::init<>())
     .def_static("get_inputs_name", &FC_Op::getInputsName)
     .def_static("get_outputs_name", &FC_Op::getOutputsName)
-    .def_static("attributes_name", &FC_Op::staticGetAttrsName)
-    .def("out_channels", &FC_Op::outChannels);
+    .def("out_channels", &FC_Op::outChannels)
+    // .def_property_readonly("a", &FC_Op::get_a)
+    // .def_property_readonly("a", [](const FC_Op& self) {
+    //     const AttrDict a = AttrDict(self.get_a());
+    //     return a;
+    // })
+    .def("__repr__", [](FC_Op& b) {
+        return fmt::format("Operator(type='{}')", b.Type);
+    });
 
   declare_registrable<FC_Op>(m, "FCOp");
 
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 83891624deede4b1f6f6f0c649358e9ed8de0a24..aa831d1cfe92fb720df00bb7d8dd3af7f1c1a668 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -21,7 +21,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Gather(py::module& m) {
-    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, Attributes, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
+    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
         .def(py::init<std::int8_t,
                       const std::vector<int64_t>,
                       const std::vector<DimSize_t>>(),
@@ -29,8 +29,7 @@ void init_Gather(py::module& m) {
                 py::arg("indices"),
                 py::arg("gathered_shape"))
         .def_static("get_inputs_name", &Gather_Op::getInputsName)
-        .def_static("get_outputs_name", &Gather_Op::getOutputsName)
-        .def_static("attributes_name", &Gather_Op::staticGetAttrsName);
+        .def_static("get_outputs_name", &Gather_Op::getOutputsName);
 
     declare_registrable<Gather_Op>(m, "GatherOp");
 
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 7078ca3b0e84d7251aadbc6035e348ac9cd72571..6af8fef88e411af0a3ecbe5a771bf7af24de411a 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -11,6 +11,7 @@
 
 #include <stdio.h>
 
+#include <memory>
 #include <string>
 
 #include <pybind11/functional.h>
@@ -27,7 +28,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
-    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp",
+    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, OperatorTensor>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
         .def(py::init<const std::string&, IOIndex_t, IOIndex_t, IOIndex_t>(),
                 py::arg("type"),
@@ -54,10 +55,11 @@ void init_GenericOperator(py::module& m) {
             );
             if (kwargs){
                 std::shared_ptr<GenericOperator_Op> gop = std::static_pointer_cast<GenericOperator_Op>(genericNode->getOperator());
+                std::shared_ptr<DynamicAttributes> attr = std::dynamic_pointer_cast<DynamicAttributes>(gop->attributes());
                 for (auto item : kwargs) {
                     std::string key = py::cast<std::string>(item.first);
                     py::object value = py::reinterpret_borrow<py::object>(item.second);
-                    gop->setAttrPy(key, std::move(value));
+                    attr->setAttrPy(key, std::move(value));
                 }
             }
             return genericNode;
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index b859b3be5b3dd2606d227a3ca26bd1b4eb8e75a9..f46106fb3fb168631c9681d90bda857183c9bc04 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -19,11 +19,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Attributes, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
         .def(py::init<float>(), py::arg("negative_slope"))
         .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
-        .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
-        .def_static("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
+        .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName);
     declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index befba918dff37e7d47a76c0c71bf48008244c2d0..2a850cd7bfe5cca21ea1ca54b5e9ad86b880bcc2 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -27,7 +27,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D");
-  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
@@ -37,8 +37,7 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("stride_dims"),
         py::arg("ceil_mode"))
   .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
-  .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
-  .def_static("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
+  .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
   declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index ee3f85b6578054512df7b0087d1a972176cd50a3..d021a79c5ff4e337bebf424465458ddabf056a56 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -51,20 +51,18 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
-                                                         const std::vector<DimSize_t> &dilation_dims,
-                                                         bool no_bias)
+                                                         const std::vector<DimSize_t> &dilation_dims)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-        return PaddedConv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+        return PaddedConv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("kernel_dims"),
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("no_bias")= false);
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
 }
 
 template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
@@ -93,20 +91,18 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
-                                                         const std::vector<DimSize_t> &dilation_dims,
-                                                         bool no_bias)
+                                                         const std::vector<DimSize_t> &dilation_dims)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-        return PaddedConvDepthWise_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+        return PaddedConvDepthWise_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("kernel_dims"),
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("no_bias") = false);
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
 
 }
 
@@ -180,15 +176,14 @@ void declare_LSTMOp(py::module &m) {
        py::arg("nobias") = false,
        py::arg("name") = "");
   m.def("LSTMOp", &LSTM_Op,
-       py::arg("seq_length"),
-       py::arg("nobias") = false);
+       py::arg("seq_length"));
 }
 
 void init_MetaOperatorDefs(py::module &m) {
-//   declare_PaddedConvOp<1>(m);
+  declare_PaddedConvOp<1>(m);
   declare_PaddedConvOp<2>(m);
 //   declare_PaddedConvOp<3>(m);
-//   declare_PaddedConvDepthWiseOp<1>(m);
+  declare_PaddedConvDepthWiseOp<1>(m);
   declare_PaddedConvDepthWiseOp<2>(m);
 //   declare_PaddedConvDepthWiseOp<3>(m);
 //   declare_PaddedAvgPoolingOp<1>(m);
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 2e087a3b9cb38e773971fc1a5e695835ae174a00..2b2f30f14931fd041bfb4ec1a712e5c9419fdf22 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -24,7 +24,18 @@
 namespace py = pybind11;
 namespace Aidge {
 void init_Operator(py::module& m){
+    py::enum_<OperatorType>(m, "OperatorType")
+        .value("Data", OperatorType::Data)
+        .value("Tensor", OperatorType::Tensor);
+
+    py::enum_<InputCategory>(m, "InputCategory")
+        .value("Data", InputCategory::Data)
+        .value("Param", InputCategory::Param)
+        .value("OptionalData", InputCategory::OptionalData)
+        .value("OptionalParam", InputCategory::OptionalParam);
+
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
+    .def("__repr__", &Operator::repr)
     .def("backend", &Operator::backend)
     .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput), py::arg("outputIdx"), py::arg("data"))
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
@@ -32,9 +43,14 @@ void init_Operator(py::module& m){
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
     .def("get_raw_input", &Operator::getRawInput, py::arg("inputIdx"))
     .def("nb_inputs", &Operator::nbInputs)
-    .def("nb_data", &Operator::nbData)
-    .def("nb_param", &Operator::nbParam)
     .def("nb_outputs", &Operator::nbOutputs)
+    .def("input_category", &Operator::inputCategory, py::arg("idx"),
+    R"mydelimiter(
+    Category of a specific input (Data or Param, optional or not).
+    Data inputs exclude inputs expecting parameters (weights or bias).
+
+    :rtype: InputCategory
+    )mydelimiter")
     .def("associate_input", &Operator::associateInput, py::arg("inputIdx"), py::arg("data"))
     .def("set_datatype", &Operator::setDataType, py::arg("dataType"))
     .def("set_backend", &Operator::setBackend, py::arg("name"), py::arg("device") = 0)
@@ -45,6 +61,7 @@ void init_Operator(py::module& m){
     .def("get_impl", &Operator::getImpl)
     .def("get_hook", &Operator::getHook)
     .def("add_hook", &Operator::addHook)
+    .def_property_readonly("attr", &Operator::attributes)
     ;
 }
 }
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 2bb635635c0be456071507aaf0bfbb76590f9a66..3df203ed52967e3dbc393769276015a7fe0e016f 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -26,7 +26,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
   const std::string pyClassName("PadOp" + std::to_string(DIM) + "D");
-  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
+  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, 2*DIM> &,
@@ -37,7 +37,6 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("borderValue") = 0.0)
     .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
     .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
-    .def_static("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
     ;
   declare_registrable<Pad_Op<DIM>>(m, pyClassName);
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
@@ -63,6 +62,6 @@ void init_Pad(py::module &m) {
     .export_values();
   declare_PadOp<1>(m);
   declare_PadOp<2>(m);
-  declare_PadOp<3>(m);
+  //declare_PadOp<3>(m);
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index d8873636d029435706cfb9766262ae0b8409d8a5..0c3b3f38803735d2df632496382e86a0c9f2735d 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -19,7 +19,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Pop(py::module& m) {
-    py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor, Attributes>(m, "PopOp", py::multiple_inheritance())
+    py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor>(m, "PopOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Pop_Op::getInputsName)
     .def_static("get_outputs_name", &Pop_Op::getOutputsName);
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 71347554fdc9cd937b1f14df16e370db2f77a267..30279dc477a0badbd5dc361ef7b5d071fa7b8cbc 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -31,15 +31,14 @@ void declare_Producer(py::module &m) {
 
 
 void init_Producer(py::module &m) {
-    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, Attributes, OperatorTensor>(
+    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, OperatorTensor>(
         m,
         "ProducerOp",
         py::multiple_inheritance())
-    .def(py::init<const std::shared_ptr<Tensor>, bool>(), py::arg("tensor"), py::arg("constant"))
-    .def("dims", &Producer_Op::dims)
-    .def_static("get_inputs_name", &Producer_Op::getInputsName)
-    .def_static("get_outputs_name", &Producer_Op::getOutputsName)
-    .def_static("attributes_name", &Producer_Op::staticGetAttrsName);
+        .def(py::init<const std::shared_ptr<Tensor>, bool>(), py::arg("tensor"), py::arg("constant"))
+        .def("dims", &Producer_Op::dims)
+        .def_static("get_inputs_name", &Producer_Op::getInputsName)
+        .def_static("get_outputs_name", &Producer_Op::getOutputsName);
 
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(
                                         const std::shared_ptr<Tensor>,
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 00201c9bdf4ecd7ad76202c2fe78180317b736dd..3023c077e2f3695902ca76dfa21831749f0ca82e 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -26,12 +26,11 @@ namespace Aidge {
 
 void declare_ReduceMeanOp(py::module &m) {
   const std::string pyClassName("ReduceMeanOp");
-  py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, Attributes, OperatorTensor>(
+  py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, OperatorTensor>(
     m, pyClassName.c_str(), py::multiple_inheritance())
     .def(py::init<std::vector<std::int32_t>, DimSize_t>(), py::arg("axes"), py::arg("keep_dims"))
     .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
-    .def_static("attributes_name", &ReduceMean_Op::staticGetAttrsName)
     ;
   declare_registrable<ReduceMean_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index 5a07de2f00399b761c0652e5dcdccdc0d49938de..89d93134ac2f590bcb067aa6936081c16fc1e2a3 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -19,10 +19,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Reshape(py::module& m) {
-    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, Attributes, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
-    .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
-    .def_static("get_inputs_name", &Reshape_Op::getInputsName)
-    .def_static("get_outputs_name", &Reshape_Op::getOutputsName);
+    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
+        .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
+        .def_static("get_inputs_name", &Reshape_Op::getInputsName)
+        .def_static("get_outputs_name", &Reshape_Op::getOutputsName);
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
     m.def("Reshape", &Reshape, py::arg("shape") = std::vector<std::int64_t>(), py::arg("allowzero") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a925af8cf357dabc09f4e8e3c39af9519b4ed550
--- /dev/null
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -0,0 +1,29 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Resize.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Resize(py::module& m) {
+    py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
+        .def_static("get_inputs_name", &Resize_Op::getInputsName)
+        .def_static("get_outputs_name", &Resize_Op::getOutputsName);
+
+    declare_registrable<Resize_Op>(m, "ResizeOp");
+
+    m.def("Resize", &Resize, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index 0660cdb003ed4d5946f54786c0a51d9051d83d5a..31e6c0b08194fbb8b6ec2270e8127a2f838ba78f 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -21,11 +21,10 @@ namespace Aidge {
 
 void init_Scaling(py::module& m)
 {
-    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, Attributes, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
-    .def(py::init<float, size_t, bool>(), py::arg("scaling_factor"), py::arg("nb_bits"), py::arg("is_output_unsigned"))
-    .def_static("get_inputs_name", &Scaling_Op::getInputsName)
-    .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
-    .def_static("attributes_name", &Scaling_Op::staticGetAttrsName);
+    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
+        .def(py::init<float, size_t, bool>(), py::arg("scaling_factor"), py::arg("nb_bits"), py::arg("is_output_unsigned"))
+        .def_static("get_inputs_name", &Scaling_Op::getInputsName)
+        .def_static("get_outputs_name", &Scaling_Op::getOutputsName);
     declare_registrable<Scaling_Op>(m, "ScalingOp");
     m.def("Scaling", &Scaling, py::arg("scaling_factor") = 1.0f, py::arg("nb_bits") = 8, py::arg("is_output_unsigned") = true, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index dbae1d95d81ef65d27167bcd0774366dcc41b325..4e1d4203e48f714746587c9f209b4d28bfecb439 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -9,11 +9,10 @@
  *
  ********************************************************************************/
 
+#include <cstdint>  // std::int64_t
+
 #include <pybind11/pybind11.h>
-#include <string>
-#include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Shape.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 
@@ -21,14 +20,13 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Shape(py::module& m) {
-    py::class_<Shape_Op, std::shared_ptr<Shape_Op>, Attributes, OperatorTensor>(m, "ShapeOp", py::multiple_inheritance())
-        .def(py::init<std::int64_t,
-                      std::int64_t>(),
+    py::class_<Shape_Op, std::shared_ptr<Shape_Op>, OperatorTensor>(m, "ShapeOp", py::multiple_inheritance())
+        .def(py::init<const std::int64_t,
+                      const std::int64_t>(),
                 py::arg("start"),
                 py::arg("end"))
         .def_static("get_inputs_name", &Shape_Op::getInputsName)
-        .def_static("get_outputs_name", &Shape_Op::getOutputsName)
-        .def_static("attributes_name", &Shape_Op::staticGetAttrsName);
+        .def_static("get_outputs_name", &Shape_Op::getOutputsName);
 
     declare_registrable<Shape_Op>(m, "ShapeOp");
 
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index becb6f35fb7413c042f6a902aadb602e4547ee01..f27e469d84c463ec48d1f9484807a8c93b7a5f4d 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -20,11 +20,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Softmax(py::module& m) {
-    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Attributes, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
-    .def(py::init<std::size_t>(), py::arg("axis"))
-    .def_static("get_inputs_name", &Softmax_Op::getInputsName)
-    .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
-    .def_static("attributes_name", &Softmax_Op::staticGetAttrsName);
+    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
+        .def(py::init<std::size_t>(), py::arg("axis"))
+        .def_static("get_inputs_name", &Softmax_Op::getInputsName)
+        .def_static("get_outputs_name", &Softmax_Op::getOutputsName);
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index f3c000291dfca954bbed93b9400ac0bd8df8025b..c0c3ad617bef3eda3e283667944ac423cd10a622 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -27,12 +27,11 @@ namespace Aidge {
 
 void declare_Transpose(py::module &m) {
   const std::string pyClassName("TransposeOp");
-  py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, Attributes, OperatorTensor>(
+  py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, OperatorTensor>(
     m, "TransposeOp", py::multiple_inheritance())
-  .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order"))
-  .def_static("get_inputs_name", &Transpose_Op::getInputsName)
-  .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
-  .def_static("attributes_name", &Transpose_Op::staticGetAttrsName);
+    .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order"))
+    .def_static("get_inputs_name", &Transpose_Op::getInputsName)
+    .def_static("get_outputs_name", &Transpose_Op::getOutputsName);
   declare_registrable<Transpose_Op>(m, pyClassName);
   m.def("Transpose", &Transpose, py::arg("output_dims_order"), py::arg("name") = "");
 }
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 42e29fd43324d12ea4cac2c16c88a056903b7c54..9443ed55eaaf6dc04ad9ee4612ed9d491aed54ae 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -51,6 +51,7 @@ void init_Pow(py::module&);
 void init_ReduceMean(py::module&);
 void init_ReLU(py::module&);
 void init_Reshape(py::module&);
+void init_Resize(py::module&);
 void init_Scaling(py::module&);
 void init_Shape(py::module&);
 void init_Sigmoid(py::module&);
@@ -121,6 +122,7 @@ void init_Aidge(py::module& m) {
     init_ReduceMean(m);
     init_ReLU(m);
     init_Reshape(m);
+    init_Resize(m);
     init_Scaling(m);
     init_Shape(m);
     init_Sigmoid(m);
diff --git a/python_binding/recipes/pybind_GraphViewHelper.cpp b/python_binding/recipes/pybind_GraphViewHelper.cpp
index e65b790d3eba6072e3e1b112c7d841959d4a5672..ac56fb4b43eb5b0a737157ec9e64c6771a692816 100644
--- a/python_binding/recipes/pybind_GraphViewHelper.cpp
+++ b/python_binding/recipes/pybind_GraphViewHelper.cpp
@@ -24,6 +24,5 @@ namespace py = pybind11;
 namespace Aidge {
 void init_GraphViewHelper(py::module &m) {
     m.def("producers", &producers, py::arg("graphview"));
-    m.def("compile_gradient", &compile_gradient, py::arg("graphview"));
 }
 } // namespace Aidge
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index 87d43727512a74aa88eeb302d95cbfc46b71b01e..ac35ce0a62408a69637a4160c9a008aba9dceb66 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -35,7 +35,7 @@ void init_Scheduler(py::module& m){
     py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>, Scheduler>(m, "SequentialScheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("data")=std::vector<Tensor>())
-    .def("backward", &SequentialScheduler::backward, py::arg("instanciate_grad")=true)
+    .def("backward", &SequentialScheduler::backward)
     ;
 
     py::class_<ParallelScheduler, std::shared_ptr<ParallelScheduler>, Scheduler>(m, "ParallelScheduler")
diff --git a/python_binding/utils/pybind_Attributes.cpp b/python_binding/utils/pybind_Attributes.cpp
index bfce891176822a3b1c07b1ded0c46c9c94a43c0a..7f5dde63c4835eb694d5fd2d571d7c9c1fd5a9ac 100644
--- a/python_binding/utils/pybind_Attributes.cpp
+++ b/python_binding/utils/pybind_Attributes.cpp
@@ -1,31 +1,47 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
+
 DynamicAttributes test_DynamicAttributes_binding() {
     DynamicAttributes attrs;
-    attrs.addAttr<int>("a", 42);
-    attrs.addAttr<std::string>("b", "test");
-    attrs.addAttr<std::vector<bool>>("c", {true, false, true});
+    attrs.addAttr<int>("A", 42);
+    attrs.addAttr<std::string>("B", "test");
+    attrs.addAttr<std::vector<bool>>("C", {true, false, true});
     return attrs;
 }
 
 double test_DynamicAttributes_binding_check(DynamicAttributes& attrs) {
-    return attrs.getAttr<double>("d");
+    return attrs.getAttr<double>("D");
 }
 
 void init_Attributes(py::module& m){
     py::class_<Attributes, std::shared_ptr<Attributes>>(m, "Attributes")
-    .def("has_attr", &Attributes::hasAttr, py::arg("name"))
-    .def("get_attr_type", &Attributes::getAttrType, py::arg("name"))
-    .def("get_attrs_name", &Attributes::getAttrsName)
+    .def("has_attr", &Attributes::hasAttrPy, py::arg("name"))
     .def("get_attr", &Attributes::getAttrPy, py::arg("name"))
     .def("__getattr__", &Attributes::getAttrPy, py::arg("name"))
     .def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
-    .def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"));
+    .def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
+    .def("dict", &Attributes::dict)
+    .def("__str__", &Attributes::str)
+    .def("__repr__", &Attributes::repr);
+
 
     py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
     .def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
@@ -35,5 +51,4 @@ void init_Attributes(py::module& m){
     m.def("test_DynamicAttributes_binding_check", &test_DynamicAttributes_binding_check, py::arg("attrs"));
 }
 
-}
-
+} // namespace Aidge
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 8a5b40e44308111c5778c5260155b644234103c8..de200300a99bb33180103608238855b2f5604145 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -27,10 +27,6 @@ Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend
 }
 
 Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(mOp.getRawInput(inputIdx),
-        "a valid input is required at index {} for operator type {}",
-        inputIdx, mOp.type());
-
     if (mOp.getRawInput(inputIdx)) {
         const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
         if (!input->empty()) {
@@ -48,10 +44,6 @@ Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inpu
 }
 
 Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(mOp.getRawInput(inputIdx),
-        "a valid input is required at index {} for operator type {}",
-        inputIdx, mOp.type());
-
     if (mOp.getRawInput(inputIdx)) {
         const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
         if (!input->empty()) {
@@ -73,10 +65,6 @@ Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) co
 
 Aidge::Elts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                          const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    AIDGE_ASSERT(mOp.getRawOutput(outputIdx),
-        "a valid output is required at index {} for operator type {}",
-        outputIdx, mOp.type());
-
     if (mOp.getRawOutput(outputIdx)) {
         const auto output = std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx));
         if (!output->empty()) {
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 5124d41f575b0ebf7f3c6cf258900e0ae656d213..5a11aa20e03bef274f784788dee1ef047cafba42 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -46,23 +46,29 @@ const std::shared_ptr<Aidge::Node> Aidge::GraphView::operator[](const std::strin
 ///////////////////////////////////////////////////////
 
 Aidge::Connector Aidge::GraphView::operator()(
-    const std::vector<Aidge::Connector> ctors) {
+    const std::vector<Aidge::Connector> ctors)
+{
   // TODO: allow for multiple inputNodes?
-  assert((inputNodes().size() == 1U) && "Too many input Nodes for the GraphView, undefined behaviour");
+  AIDGE_ASSERT(inputNodes().size() == 1U, "Multiple input Nodes for the GraphView is not supported for Connectors");
   std::shared_ptr<Node> inNode = *inputNodes().begin();
-  assert((ctors.size() == static_cast<std::size_t>(inNode->nbData())) && "Wrong number of arguments.\n");
-  for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inNode->inputs()) {
-    assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n");
-    (void)input; // avoid unused warning
-  }
 
-  IOIndex_t inID = 0;
-  for (const Connector &ctor : ctors) {
-    assert((ctor.node() != nullptr) &&
-           "Input Connector must be associated with a node");
-    ctor.node()->addChild(shared_from_this(), static_cast<std::size_t>(ctor.index()),
-                          {inNode, inID++});
+  IOIndex_t ctorIdx = 0;
+  const auto& inputs = inNode->inputs();
+  for (IOIndex_t idx = 0; idx < inNode->nbInputs(); ++idx) {
+    if (inNode->inputCategory(idx) == InputCategory::Data || inNode->inputCategory(idx) == InputCategory::OptionalData) {
+      if (ctorIdx < ctors.size()) {
+        AIDGE_ASSERT(ctors[ctorIdx].node() != nullptr, "Input Connector #{} must be associated with a node", ctorIdx);
+        AIDGE_ASSERT(inputs[idx].second == gk_IODefaultIndex, "Data input#{} connection is not free.", idx);
+        ctors[ctorIdx].node()->addChild(shared_from_this(), static_cast<std::size_t>(ctors[ctorIdx].index()),
+                              {inNode, idx});
+        ++ctorIdx;
+      }
+      else {
+        AIDGE_ASSERT(inNode->inputCategory(idx) == InputCategory::OptionalData, "Missing an input connector for non-optional Data input#{}", idx);
+      }
+    }
   }
+  AIDGE_ASSERT(ctorIdx == ctors.size(), "Too many input connectors ({}) vs available node inputs ({}).", ctors.size(), ctorIdx);
   return Connector(*(outputNodes().begin()));
 }
 
@@ -398,14 +404,21 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
 }
 
 bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) {
+    // remove current Data connections and use dummy inputs to propagate dimensions
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
     if (!dims.empty()){
       AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of given dimensions ({}) and graph inputs ({})", dims.size(), mInputNodes.size());
       for (std::size_t i = 0; i < dims.size(); ++i){
-        auto tensor = std::make_shared<Tensor>(dims[i]);
-        mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
+        const auto& currentTensorPtr =
+            std::dynamic_pointer_cast<OperatorTensor>(mInputNodes[i].first->getOperator())->getInput(mInputNodes[i].second);
+        if (currentTensorPtr) { // tensor detected
+            AIDGE_ASSERT(currentTensorPtr->dims() == dims[i], "Tensor of unexpected size provided.")
+        } else {
+            auto tensor = std::make_shared<Tensor>(dims[i]);
+            mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
+        }
       }
     }
 
@@ -418,7 +431,7 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                 AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i) == inputI.first->getOperator()->getRawOutput(inputI.second),
                   "Input#{} for node {} ({}) is not properly connected to output#{} of node {} ({}): Data or Tensor mismatch!",
                     i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type());
-            } else {
+            } else if (nodePtr->inputCategory(i) != InputCategory::OptionalData && nodePtr->inputCategory(i) != InputCategory::OptionalParam) {
                 // Input is missing
                 AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i),
                   "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
@@ -583,15 +596,17 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
 
   // add learnable parameters to the graph
   if (includeLearnableParam) {
-    for (IOIndex_t i = node->nbData(); i < node->nbInputs(); ++i) {
-      std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
-      if (parentNode) {
-          parentNode->addView(shared_from_this());
-          mNodes.insert(parentNode);
-          if (!(parentNode->name()).empty())
-            mNodeRegistry.insert(std::make_pair(parentNode->name(), parentNode));
-          // check if the parentNode is an input/output node
-          updateInputsOutputsNew(parentNode);
+    for (IOIndex_t i = 0; i < node->nbInputs(); ++i) {
+      if (node->inputCategory(i) == InputCategory::Param || node->inputCategory(i) == InputCategory::OptionalParam) {
+        std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
+        if (parentNode) {
+            parentNode->addView(shared_from_this());
+            mNodes.insert(parentNode);
+            if (!(parentNode->name()).empty())
+              mNodeRegistry.insert(std::make_pair(parentNode->name(), parentNode));
+            // check if the parentNode is an input/output node
+            updateInputsOutputsNew(parentNode);
+        }
       }
     }
   }
@@ -879,29 +894,31 @@ Aidge::GraphView::getNode(const std::string& nodeName) const {
 void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnableParam) {
   // remove learnable params
   if (includeLearnableParam) {
-    for (IOIndex_t i = nodePtr->nbData(); i < nodePtr->nbInputs(); ++i) {
-      auto inputI = nodePtr->input(i);
-      if (inputI.first != nullptr) {
-        bool removeNode = true;
-        for (const auto& parentOutput : inputI.first->outputs()) {
-          for (const auto& childOfParentOutput : parentOutput) {
-            // only remove the learnable parameter if not related to any other Node in the GraphView
-            if (childOfParentOutput.first != nodePtr) {
-              removeNode = false;
-              break;
+    for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
+      if (nodePtr->inputCategory(i) == InputCategory::Param || nodePtr->inputCategory(i) == InputCategory::OptionalParam) {
+        auto inputI = nodePtr->input(i);
+        if (inputI.first != nullptr) {
+          bool removeNode = true;
+          for (const auto& parentOutput : inputI.first->outputs()) {
+            for (const auto& childOfParentOutput : parentOutput) {
+              // only remove the learnable parameter if not related to any other Node in the GraphView
+              if (childOfParentOutput.first != nodePtr) {
+                removeNode = false;
+                break;
+              }
             }
           }
-        }
-        if (removeNode) {
-          // assert Learnable Parameter in the GraphView scope
-          if (mNodes.find(inputI.first) != mNodes.end()) {
-            mNodes.erase(inputI.first);
-            inputI.first->removeView(shared_from_this());
-          }
-          if (!inputI.first->name().empty()) { mNodeRegistry.erase(inputI.first->name()); }
+          if (removeNode) {
+            // assert Learnable Parameter in the GraphView scope
+            if (mNodes.find(inputI.first) != mNodes.end()) {
+              mNodes.erase(inputI.first);
+              inputI.first->removeView(shared_from_this());
+            }
+            if (!inputI.first->name().empty()) { mNodeRegistry.erase(inputI.first->name()); }
 
-          // check if the node was an input/output node
-          updateInputsOutputsDelete(inputI.first);
+            // check if the node was an input/output node
+            updateInputsOutputsDelete(inputI.first);
+          }
         }
       }
     }
@@ -1045,6 +1062,10 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
                       for (const auto& child : outputChildren[i]) {
                         inputParents[i].first -> addChild(child.first, inputParents[i].second, child.second);
                       }
+                    } else {
+                      for (const auto& child : outputChildren[i]) {
+                        child.first->getOperator()->resetInput(child.second);
+                      }
                     }
                 }
             }
@@ -1064,7 +1085,10 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
             // Case 2
             if ((oldOIn.size() == 1) && (inputParents[0].first)) {
                 for (std::size_t i = 0; i < newOIn.size(); ++i) {
-                    inputParents[0].first -> addChild(newOIn[i].first, inputParents[0].second, newOIn[i].second);
+                    // Only re-connect the same input category
+                    if (newOIn[i].first->inputCategory(newOIn[i].second) == oldOIn[0].first->inputCategory(oldOIn[0].second)) {
+                      inputParents[0].first -> addChild(newOIn[i].first, inputParents[0].second, newOIn[i].second);
+                    }
                 }
             } else {
                 for (std::size_t i = 0; i < oldOIn.size(); ++i) {
@@ -1350,7 +1374,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     auto clonedNode = cloneNode(node_ptr);
     if (clonedNode == nullptr) {
       AIDGE_ASSERT(node_ptr->getChildren().size() <= 1, "deleted nodes in GraphView::clone() cannot have multiple children");
-      AIDGE_ASSERT(node_ptr->nbData() <= 1, "deleted nodes in GraphView::clone() cannot have multiple data input parents");
+      AIDGE_ASSERT(node_ptr->dataInputs().size() <= 1, "deleted nodes in GraphView::clone() cannot have multiple data input parents");
     }
     oldToNewNodes[node_ptr] = clonedNode;
   }
@@ -1368,8 +1392,8 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
         while (oldToNewNodes[parent.first] == nullptr) {
           // Find next valid parent in line, going backward in the graph
           AIDGE_INTERNAL_ASSERT(parent.first->getChildren().size() == 1);
-          AIDGE_INTERNAL_ASSERT(parent.first->nbData() <= 1);
           const auto& parents = parent.first->dataInputs();
+          AIDGE_INTERNAL_ASSERT(parents.size() <= 1);
 
           if (!parents.empty() && parents[0].first != nullptr // a valid parent exists
             && oldToNewNodes.find(parents[0].first) != oldToNewNodes.end()) // parent is in the GraphView
@@ -1450,9 +1474,9 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
   for (auto it = newOutputNodes.begin(); it != newOutputNodes.end(); ) {
     // If output node was removed, find previous valid output
     while (oldToNewNodes[it->first] == nullptr) {
-      // Removed node should have only one connected data input, otherwise cloning is invalid
-      AIDGE_INTERNAL_ASSERT(it->first->nbData() <= 1);
       auto parents = it->first->dataInputs();
+      // Removed node should have only one connected data input, otherwise cloning is invalid
+      AIDGE_INTERNAL_ASSERT(parents.size() <= 1);
 
       if (!parents.empty() && parents[0].first != nullptr // a valid parent exists
         && oldToNewNodes.find(parents[0].first) != oldToNewNodes.end()) // parent is in the GraphView
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 4fad845242979de97ca1348d9dfb9e2f73714f88..50b8be13c6faac62a8b2aecf1e767e0b83024d3c 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -38,17 +38,24 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
 ///////////////////////////////////////////////////////
 
 Aidge::Connector Aidge::Node::operator()(const std::vector<Connector>& ctors) {
-    assert((ctors.size() == nbData()) && "Wrong number of arguments.\n");
-    for (std::size_t i = 0; i < nbData(); i++) {
-        assert((gk_IODefaultIndex == input(i).second) &&
-               "At least one input connection is not free.\n");
-    }
-    IOIndex_t i = 0;
-    for (const Connector& ctor : ctors) {
+    IOIndex_t idx = 0;
+    for (const auto& ctor : ctors) {
+        // Skip to next possible input idx
+        for (; idx < nbInputs() && (inputCategory(idx) != InputCategory::Data && inputCategory(idx) != InputCategory::OptionalData); ++idx) {}
+
+        AIDGE_ASSERT(idx < nbInputs(), "Too many input connectors ({}) vs available node inputs.", ctors.size());
+        AIDGE_ASSERT(input(idx).second == gk_IODefaultIndex, "Data input#{} connection is not free.", idx);
+
         if (ctor.node() != nullptr) {  // ctor must be associated with a node
-            ctor.node()->addChild(shared_from_this(), ctor.index(), i++);
+            ctor.node()->addChild(shared_from_this(), ctor.index(), idx);
         }
+        ++idx;
     }
+
+    // Skip to next possible input idx
+    for (; idx < nbInputs() && (inputCategory(idx) != InputCategory::Data && inputCategory(idx) != InputCategory::OptionalData); ++idx) {}
+    AIDGE_ASSERT(idx == nbInputs(), "Missing an input connector for Data input#{}", idx);
+
     return Connector(shared_from_this());
 }
 
@@ -109,10 +116,11 @@ Aidge::IOIndex_t Aidge::Node::getNbFreeDataInputs() const {
 
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> Aidge::Node::dataInputs()
         const {
-    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
-            std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbData());
-    for (std::size_t i = 0; i < static_cast<std::size_t>(nbData()); ++i) {
-        res[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>(mParents[i], mIdOutParents[i]);
+    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res;
+    for (std::size_t i = 0; i < static_cast<std::size_t>(nbInputs()); ++i) {
+        if (inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData) {
+            res.push_back(std::pair<std::shared_ptr<Node>, IOIndex_t>(mParents[i], mIdOutParents[i]));
+        }
     }
     return res;
 }
@@ -328,18 +336,19 @@ bool Aidge::Node::removeChild(const std::shared_ptr<Aidge::Node> nodePtr,
 
 void Aidge::Node::resetConnections(bool includeLearnableParam) {
     // remove every parents reference to it
-    IOIndex_t nbRemovedInputs = includeLearnableParam ? nbInputs() : nbData();
-    for (IOIndex_t i = 0; i < nbRemovedInputs; ++i) {
-        std::pair<std::shared_ptr<Node>, IOIndex_t> parent = input(i);
-        if (parent.first) {
-            // number of children linked to the parent's output
-            while (parent.first->removeChild(shared_from_this(), parent.second) == true) {
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (includeLearnableParam || inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData) {
+            std::pair<std::shared_ptr<Node>, IOIndex_t> parent = input(i);
+            if (parent.first) {
+                // number of children linked to the parent's output
+                while (parent.first->removeChild(shared_from_this(), parent.second) == true) {
+                }
             }
+            // every reference to this object as child has been removed
+            // removing reference to parents.
+            mParents[i] = nullptr;
+            mIdOutParents[i] = gk_IODefaultIndex;
         }
-        // every reference to this object as child has been removed
-        // removing reference to parents.
-        mParents[i] = nullptr;
-        mIdOutParents[i] = gk_IODefaultIndex;
     }
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
         for (std::pair<std::shared_ptr<Node>, IOIndex_t> child : output(i)) {
diff --git a/src/graph/Testing.cpp b/src/graph/Testing.cpp
index f30ad6e25b81e1ce7768fcc201ddf00c2226eebf..774ee8912da2ddaa19583debdac063a95b5aa461 100644
--- a/src/graph/Testing.cpp
+++ b/src/graph/Testing.cpp
@@ -45,7 +45,7 @@ std::pair<Aidge::NodePtr, std::set<Aidge::NodePtr>> Aidge::RandomGraph::gen(std:
     std::vector<NodePtr> nodes(nbNodes, nullptr);
     for (auto idx : nodesSeq) {
         const std::string name = nodesType[idx] + std::to_string(idx);
-        nodes[idx] = GenericOperator(nodesType[idx], nbIOs[idx].first, 0, nbIOs[idx].second, name);
+        nodes[idx] = GenericOperator(nodesType[idx], std::vector<InputCategory>(nbIOs[idx].first, InputCategory::Data), nbIOs[idx].second, name);
     }
 
     for (std::size_t i = 0; i < nbNodes; ++i) {
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 9b77ffcbe0117292ed0aa520309febf709e8dd68..57ece07152613b831675cdecd6526d4ab26af5cb 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -33,15 +33,7 @@ Aidge::Add_Op::Add_Op(const Add_Op& op)
 }
 
 bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = (nbInputs() > 0); // do not compute anything if no input
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
         for (std::size_t i = 0; i < nbInputs(); i++) {
             inputsDims[i] = getInput(i)->dims();
@@ -70,9 +62,10 @@ bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
             }
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 07123bc88aa1da22bfa98166d6a01af8d66be98d..53ffb93269e79c0ba940f1fb0d3d94cb494ad8ce 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -27,7 +27,10 @@ template <Aidge::DimIdx_t DIM>
 const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
 
 template <Aidge::DimIdx_t DIM>
-Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op): OperatorTensor(op), Attributes_(op) {
+Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
     if (op.mImpl) {
         SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend());
     } else {
@@ -37,21 +40,17 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op): Operator
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-    if (!(getInput(0)->empty())) {
+    if (inputsAssociated()) {
         std::array<DimSize_t, DIM + 2> outputDims;
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
         outputDims[0] = inputDims[0];
         outputDims[1] = inputDims[1];
 
-        for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
             outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                         std::floor(static_cast<float>(inputDims[dim+2] -
-                                                                this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
-                                        static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
+                                                            mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
+                                        static_cast<float>(mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
         }
         getOutput(0)->resize(outputDims);
         return true;
@@ -89,10 +88,10 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz
 
         for (DimIdx_t i = 0; i < DIM; ++i) {
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        * mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
                         + 1
-                        + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
-            inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
+                        + (mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
+            inputIdxDims[2+i] *= mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
         }
         std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
         res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index 2563ef843674725dd05e77d893de3778ae4623d2..98e5c2da20fc35e18d4fd69a79cf1d87ec9d60ca 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -27,7 +27,10 @@ template <Aidge::DimIdx_t DIM>
 const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm";
 
 template <Aidge::DimIdx_t DIM>
-Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op): OperatorTensor(op), Attributes_(op) {
+Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
     if (op.mImpl) {
         SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend());
     } else {
@@ -37,23 +40,19 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op): OperatorTen
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = true;
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         const DimSize_t nbFeatures =  getInput(0)->dims()[1];
-        for (std::size_t i = nbData(); i < nbInputs(); ++i) {
-            if(getInput(i)->size() != nbFeatures) {
+        for (std::size_t i = 0; i < nbInputs(); ++i) {
+            if(inputCategory(i) == InputCategory::Param && getInput(i)->size() != nbFeatures) {
                 // /!\ Input size should be handled BEFORE calling this function
                 // This should raise an error
                 getInput(i)->resize({getInput(0)->dims()[1]});
             }
         }
         mOutputs[0]->resize(getInput(0)->dims());
+        return true;
     }
-    return associated;
+    return false;
 }
 
 template <Aidge::DimIdx_t DIM>
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index f1c8e25e17c80d58d444a1ddddbaa428b2fc4c41..8df153a67d2214e4435d9fa0aac6e74d53e11b12 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -27,6 +27,16 @@ void Aidge::Cast_OpImpl::forward() {
 
 const std::string Aidge::Cast_Op::Type = "Cast";
 
+Aidge::Cast_Op::Cast_Op(const DataType targetType)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+        attr<CastAttr::TargetType>(targetType)))
+{
+    mImpl = std::make_shared<Cast_OpImpl>(*this);
+    mOutputs[0]->setDataType(targetType);
+}
+
+
 void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Cast_Op>::exists({name})) {
         SET_IMPL_MACRO(Cast_Op, *this, name);
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index ee06ce69b135e11fe3ed5be8fa9f501debb6acd5..bf4bbb85be606fc857bf8d771b9ce211ca8e858e 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -20,7 +20,7 @@
 
 void Aidge::Concat_OpImpl::forward() {
     const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
-    const DimSize_t axis = op.template getAttr<DimSize_t>("Axis");
+    const DimSize_t axis = op.axis();
 
     assert(op.getInput(0) && "missing input in Concat operator");
     DataType datatypeFirstInput = op.getInput(0)->dataType();
@@ -60,36 +60,47 @@ void Aidge::Concat_OpImpl::forward() {
 const std::string Aidge::Concat_Op::Type = "Concat";
 
 bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
-    // Every input is non-empty with the same number of dimensions
-    bool associated = (getInput(0) != nullptr);
-    associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
-    auto outputDims =  getInput(0)->dims();
-    const auto firstInputNbDims = getInput(0) -> nbDims();
+    if (!inputsAssociated()) {
+        return false;
+    }
+    const std::size_t nbDimsInput0 = getInput(0)->nbDims();
+    if (nbDimsInput0 == 0) {
+        return false;
+    }
+    AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is empty", type());
     for (IOIndex_t i = 1; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        if (getInput(i)->nbDims() == 0) {
+            return false;
         }
+        AIDGE_ASSERT(nbDimsInput0 == getInput(i)->nbDims(),
+            "Input 0 and input {} in {} Operator have different number of dimensions: {} / {}",
+            i, type(), nbDimsInput0, getInput(i)->nbDims());
+    }
+    // Check validity of attributes with inputs
+    // Axis
+    std::int32_t axis = mAttributes->template getAttr<ConcatAttr::Axis>();
+    axis = (axis < 0) ? axis + static_cast<std::int32_t>(nbDimsInput0) : axis;
+    AIDGE_ASSERT(((axis >= 0) && (axis < static_cast<std::int32_t>(nbDimsInput0))),
+                "'Axis' attribute not compatible with provided inputs.")
+    const std::size_t axis_u64 = static_cast<std::size_t>(axis);
 
-        if (getInput(i)->nbDims() == firstInputNbDims) {
-            for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
-                if (dim == getAttr<ConcatAttr::Axis>()) {
-                    outputDims[dim] += getInput(i)->dims()[dim];
-                }
-                else {
-                    associated &= (getInput(i)->dims()[dim] == outputDims[dim]);
-                }
+    // Check validity of inputs
+    auto outputDims =  getInput(0)->dims();
+    for (IOIndex_t i = 1; i < nbInputs(); ++i) {
+        for (DimSize_t dim = 0; dim < nbDimsInput0; ++dim) {
+            if (dim == axis_u64) {
+                outputDims[axis_u64] += getInput(i)->dims()[axis_u64];
+            }
+            else {
+                AIDGE_ASSERT(getInput(i)->dims()[dim] == outputDims[dim],
+                    "Incomatible dimensions between input 0 {} and input {} {}",
+                    getInput(0)->dims(), i, getInput(i)->dims());
             }
         }
-        else {
-            associated = false;
-            break;
-        }
-    }
-    if (associated) {
-        getOutput(0)->resize(outputDims);
     }
 
-    return associated;
+    getOutput(0)->resize(outputDims);
+    return true;
 }
 
 void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 66e1d1f5b25c2b12f73a851d87d9f91aa4940322..a33af78779971e77da4f4e910b89b9263a1af5d6 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -29,7 +29,7 @@ const std::string Aidge::Conv_Op<DIM>::Type = "Conv";
 template <Aidge::DimIdx_t DIM>
 Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
     : OperatorTensor(op),
-      Attributes_(op)
+      mAttributes(op.mAttributes)
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
@@ -40,15 +40,7 @@ Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = true;
-    for (IOIndex_t i = 0; i < 3; ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         // first check weight since it defines inChannels and outChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
                     "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
@@ -57,29 +49,30 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
                     (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
                     "Wrong input size for Conv operator.");
         // check optional bias
-        if(!this->template getAttr<ConvAttr::NoBias>())
+        if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                     (getInput(2)->template dims<1>()[0] == outChannels()),
                     "Wrong bias size for Conv operator.");
         std::array<DimSize_t, DIM + 2> outputDims{};
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
-        for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
-            const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
-                                                    (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
+            const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] *
+                                                    (mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
                                             1;
 
             outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                     floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                            static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
+                            static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim])));
         }
 
         outputDims[1] = outChannels();
         outputDims[0] = inputDims[0];
         mOutputs[0]->resize(outputDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 
@@ -113,18 +106,18 @@ Aidge::Conv_Op<DIM>::computeReceptiveField(
         std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
         for (DimIdx_t i = 0; i < DIM; ++i) {
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        * mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
                         + 1
-                        + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-            inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
+                        + (mAttributes->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * mAttributes->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
         }
 
         // Weight
         // same output value, every input channel is used
         std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
         for (std::size_t i = 0; i < DIM; ++i) {
-            weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
+            weightDims.push_back(mAttributes->template getAttr<ConvAttr::KernelDims>()[i]);
         }
         std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
         weightIdxDims[0] = firstEltDims[1];
@@ -135,7 +128,7 @@ Aidge::Conv_Op<DIM>::computeReceptiveField(
         res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
 
         // Bias
-        if (! this->template getAttr<ConvAttr::NoBias>()){
+        if (getInput(2)){
             const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
             const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
             res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
@@ -164,4 +157,5 @@ void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     }
 }
 
+template class Aidge::Conv_Op<1>;
 template class Aidge::Conv_Op<2>;
\ No newline at end of file
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index 77441be414847c08452c71fc2e35c4e3e5bd3c04..342fd86195d5c2e85a63d990c4ebbb75e7f50a6b 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -30,7 +30,7 @@ const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
 template <Aidge::DimIdx_t DIM>
 Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
     : OperatorTensor(op),
-      Attributes_(op)
+      mAttributes(op.mAttributes)
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
@@ -41,16 +41,7 @@ Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    // TODO : add a check of inputs dimensions ?
-    bool associated = true;
-    for (IOIndex_t i = 0; i < 3; ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         // first check weight since it defines nbChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
                     "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
@@ -59,29 +50,30 @@ bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
                     (getInput(0)->template dims<DIM+2>()[1] == nbChannels()),
                     "Wrong input size for Conv operator.");
         // check optional bias
-        if(!this->template getAttr<ConvDepthWiseAttr::NoBias>())
+        if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                     (getInput(2)->template dims<1>()[0] == nbChannels()),
                     "Wrong bias size for Conv operator.");
         std::array<DimSize_t, DIM + 2> outputDims = {};
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
-        for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
-            const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
-                                                    (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
+            const DimSize_t kernelExtent = mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
+                                                    (mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
                                             1;
 
             outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                     floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                            static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
+                            static_cast<float>(mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
         }
 
         outputDims[1] = inputDims[1];
         outputDims[0] = inputDims[0];
         mOutputs[0]->resize(outputDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 
@@ -114,17 +106,17 @@ Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
         std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
         for (DimIdx_t i = 0; i < DIM; ++i) {
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        * mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
                         + 1
-                        + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-            inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
+                        + (mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
         }
 
         // Weight
         std::vector<DimSize_t> weightDims{outputDims[1], 1};
         for (std::size_t i = 0; i < DIM; ++i) {
-            weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
+            weightDims.push_back(mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
         }
         std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
         weightIdxDims[0] = firstEltDims[1];
@@ -135,7 +127,7 @@ Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
         res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
         res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
         // Bias
-        if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
+        if (getInput(2)){
             const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
             const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
             res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
@@ -164,4 +156,5 @@ void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::De
     }
 }
 
+template class Aidge::ConvDepthWise_Op<1>;
 template class Aidge::ConvDepthWise_Op<2>;
\ No newline at end of file
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index e6300d08c2c792c8a3eb66b307aca53f9d2acc73..387a9516077a937cca5c20ad091547b7f1c5be6f 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -23,13 +23,7 @@
 const std::string Aidge::Div_Op::Type = "Div";
 
 bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 1d53893b1e37933ef41540202b76fdcdfca08130..44d499bc7e125c757f802e086c22e1e6c72e9216 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -37,14 +37,7 @@ void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::sh
 }
 
 bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
-    bool associated = true;
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         // first check weight since it defines inChannels and outChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == 2),
                     "Wrong weight Tensor dimension: {} for FC operator (should have 2 dimensions).", getInput(1)->nbDims());
@@ -64,15 +57,16 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
                     nbInputFeatures, inChannels);
         }
         // check optional bias
-        if(!this->template getAttr<FCAttr::NoBias>())
+        if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == 1) &&
                     (getInput(2)->template dims<1>()[0] == outChannels),
                     "Wrong bias size for FC operator.");
         // <batch, OutChannels>
         mOutputs[0]->resize({getInput(0)->dims()[0], outChannels});
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index b0b9a0e84882cae55a9a3c336684d43e208cb503..c28a0587a755ef0a910ec5bfdeb9caa2f1edc216 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -22,9 +22,8 @@
 
 void Aidge::Gather_OpImpl::forward() {
     const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
-    const auto axis = op.template getAttr<std::int8_t>("Axis");
 
-    const std::size_t axisIdx = static_cast<std::size_t>(axis) + (axis >= 0 ? 0 : op.getInput(0)->dims().size());
+    const std::size_t axisIdx = static_cast<std::size_t>(op.axis()) + (op.axis() >= 0 ? 0 : op.getInput(0)->dims().size());
 
     std::size_t postAxisElems = 1;
     for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) {
@@ -38,11 +37,11 @@ void Aidge::Gather_OpImpl::forward() {
     std::size_t outputOffset = 0;
     for (std::size_t i=0; i<preAxisElems; ++i)
     {
-        for(std::size_t j=0; j<op.template getAttr<std::vector<int64_t>>("Indices").size(); ++j)
+        for(std::size_t j = 0; j < op.indices().size(); ++j)
         {
-            const std::size_t idx = op.template getAttr<std::vector<int64_t>>("Indices")[j] >= 0 ?
-                                        static_cast<std::size_t>(op.template getAttr<std::vector<int64_t>>("Indices")[j]) :
-                                        static_cast<std::size_t>(op.template getAttr<std::vector<int64_t>>("Indices")[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
+            const std::size_t idx = op.indices()[j] >= 0 ?
+                                        static_cast<std::size_t>(op.indices()[j]) :
+                                        static_cast<std::size_t>(op.indices()[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
             op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset);
             outputOffset += postAxisElems;
         }
@@ -61,51 +60,48 @@ bool Aidge::Gather_Op::dimsForwarded() const {
 }
 
 bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) {
-    // check data input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute Indices
+        if (getInput(1)) {
+            if (!this->indices().empty()) {
+                Log::notice("Gather_Op: ignoring non-empty Indices attribute because input#1 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("Gather_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            std::shared_ptr<Tensor> fallback;
+            this->gatheredShape() = getInput(1)->dims();
+            this->indices().clear(); // If both are provided input would override attrs
+            this->indices().reserve(getInput(1)->size());
+            const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(indices.getImpl()->hostPtr()),
+                        indices.size(),
+                        std::back_inserter(this->indices()));
+        }
 
-    if (getInput(0)->empty()) {
-        return false;
-    }
+        AIDGE_ASSERT(!this->indices().empty(), "Missing input#1 or Indices attribute");
 
-    if (getInput(1) && !getInput(1)->empty()) {
-        if (!this->template getAttr<GatherAttr::Indices>().empty()) {
-            Log::notice("Gather_Op: ignoring non-empty Indices attribute because input#1 takes precedence");
-        }
+        // Compute output dims
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
 
-        if (!allowDataDependency) {
-            Log::warn("Gather_Op: unable to forwardDims() because output dims are data dependent on input#1");
-            return false;
+        std::int8_t axisIdx = this->axis()>=0?
+                                this->axis():
+                                this->axis()+outDims.size();
+        outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
+        if( !this->gatheredShape().empty())
+        {
+            outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx),
+                            this->gatheredShape().begin(),
+                            this->gatheredShape().end());
         }
-
-        std::shared_ptr<Tensor> fallback;
-        this->template getAttr<GatherAttr::GatheredShape>() = getInput(1)->dims();
-        this->template getAttr<GatherAttr::Indices>().clear(); // If both are provided input would override attrs
-        this->template getAttr<GatherAttr::Indices>().reserve(getInput(1)->size());
-        const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(indices.getImpl()->hostPtr()),
-                    indices.size(),
-                    std::back_inserter(this->template getAttr<GatherAttr::Indices>()));
+        mOutputs[0]->resize(outDims);
+        return true;
     }
 
-    AIDGE_ASSERT(!this->template getAttr<GatherAttr::Indices>().empty(), "Missing input#1 or Indices attribute");
-
-    std::vector<DimSize_t> outDims = getInput(0)->dims();
-
-    std::int8_t axisIdx = this->template getAttr<GatherAttr::Axis>()>=0?
-                            this->template getAttr<GatherAttr::Axis>():
-                            this->template getAttr<GatherAttr::Axis>()+outDims.size();
-    outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
-    if( !this->template getAttr<GatherAttr::GatheredShape>().empty())
-    {
-        outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx),
-                        this->template getAttr<GatherAttr::GatheredShape>().begin(),
-                        this->template getAttr<GatherAttr::GatheredShape>().end());
-    }
-    mOutputs[0]->resize(outDims);
-    return true;
+    return false;
 }
 
 void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index a770e1602b7fc33fc47a65c51c2dcf05d5840ba4..d49e1f0838f623bca1546e54ea4f4e470d70e1c5 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -26,9 +26,10 @@ const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Inpu
 }
 
 bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (mForwardDims) {
+    if (mForwardDims && inputsAssociated(false)) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>());
         for (std::size_t i = 0; i < nbInputs(); ++i) {
+            // Check for input, as it may be optional
             if (getInput(i)) {
                 inputsDims[i] = getInput(i)->dims();
             }
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index b09426f8f835eda5600b630488ef18c5b08ba32a..1632c8a7677c884194494269e1a8cd93e7ef7822 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -22,26 +22,20 @@
 const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling";
 
 bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
-  // error checking
-  if (!getInput(0)) {
-    AIDGE_THROW_OR_ABORT(std::runtime_error,
-                         "GlobalAveragePooling : The input was not connected");
-  }
-  else if (!getInput(0)->empty()) {
-    AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
-                 "GlobalAveragePooling :  needs at least a 3 dimensions input, "
-                 "number of input dim : {}",
-                 getInput(0)->dims().size());
-    // Global average pooling takes each filter, averages its values and uses
-    // it as an output(Much like a fancier flatten). 1st dim is batch 2nd is
-    // number of filter
-    const std::vector<DimSize_t> out_dims{getInput(0)->dims().at(0),
-                                          getInput(0)->dims().at(1)};
-    mOutputs[0]->resize(out_dims);
-    return true;
-  }
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
+                    "GlobalAveragePooling :  needs at least a 3 dimensions input, "
+                    "number of input dim : {}",
+                    getInput(0)->dims().size());
+        // Global average pooling takes each filter, averages its values and uses
+        // it as an output(Much like a fancier flatten). 1st dim is batch 2nd is
+        // number of filter
+        mOutputs[0]->resize({getInput(0)->dims().at(0),
+                             getInput(0)->dims().at(1)});
+        return true;
+    }
 
-  return false;
+    return false;
 }
 
 void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 8f7548155cde4c7187f7a7fe96a44c4accd2c302..17b4960dfdfc9de199cc25b0119a5cb000bcf48c 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -21,58 +21,57 @@
 const std::string Aidge::MatMul_Op::Type = "MatMul";
 
 bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input. Cannot compute output dimensions for MatMul Operator.");
-    }
-    if (getInput(0)->empty() && getInput(1)->empty()) {
-        // both inputs are scalar
-        mOutputs[0]->resize({});
-        return true;
-    }
-    else if (!getInput(0)->empty() && !getInput(1)->empty())
-    {
-        std::vector<std::size_t> dims0 = getInput(0)->dims();
-        std::vector<std::size_t> dims1 = getInput(1)->dims();
+    if (inputsAssociated(false)) {
+        if (getInput(0)->empty() && getInput(1)->empty()) {
+            // both inputs are scalar
+            mOutputs[0]->resize({});
+            return true;
+        }
+        else if (!getInput(0)->empty() && !getInput(1)->empty())
+        {
+            std::vector<std::size_t> dims0 = getInput(0)->dims();
+            std::vector<std::size_t> dims1 = getInput(1)->dims();
 
-        // keep second-to-last dimension of dims0
-        const bool keepDim0 = dims0.size() > 1;
-        // keep last dimension of dims1
-        const bool keepDim1 = dims1.size() > 1;
+            // keep second-to-last dimension of dims0
+            const bool keepDim0 = dims0.size() > 1;
+            // keep last dimension of dims1
+            const bool keepDim1 = dims1.size() > 1;
 
-        if (dims0.size() == 1) {
-            dims0.insert(dims0.cbegin(), 1);
-        }
-        if (dims1.size() == 1) {
-            dims1.push_back(1);
-        }
-        const std::size_t dims_size = std::max(dims0.size(), dims1.size());
+            if (dims0.size() == 1) {
+                dims0.insert(dims0.cbegin(), 1);
+            }
+            if (dims1.size() == 1) {
+                dims1.push_back(1);
+            }
+            const std::size_t dims_size = std::max(dims0.size(), dims1.size());
 
 
-        if (dims0.size() > dims1.size()) {
-            dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
-        }
-        else if (dims1.size() > dims0.size()) {
-            dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
-        }
+            if (dims0.size() > dims1.size()) {
+                dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
+            }
+            else if (dims1.size() > dims0.size()) {
+                dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
+            }
 
-        AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
+            AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
 
-        std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
-        for (std::size_t i = 0; i < dims_size-2; ++i) {
-            AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad vector dimension.");
-            outDims[i] = std::max(dims0[i], dims1[i]);
-        }
+            std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
+            for (std::size_t i = 0; i < dims_size-2; ++i) {
+                AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad vector dimension.");
+                outDims[i] = std::max(dims0[i], dims1[i]);
+            }
 
-        // use keepDim0 instead of dims0.size() because dims0 has been modified
-        if (keepDim0)
-            outDims.push_back(dims0[dims_size-2]);
-        if (keepDim1)
-            outDims.push_back(dims1[dims_size-1]);
+            // use keepDim0 instead of dims0.size() because dims0 has been modified
+            if (keepDim0)
+                outDims.push_back(dims0[dims_size-2]);
+            if (keepDim1)
+                outDims.push_back(dims1[dims_size-1]);
 
-        mOutputs[0]->resize(outDims);
-        return true;
+            mOutputs[0]->resize(outDims);
+            return true;
+        }
     }
-    
+
     return false;
 }
 
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index e08b5f1054f07a9dcc1722d219ebce022f994d61..adf79b5c69e991ad7979184c313448e4288a8ecb 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -24,14 +24,13 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getNbRequiredData(
     Aidge::IOIndex_t inputIdx) const
 {
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
 
-    if (scheduleStep == 0 && inputIdx == 0) {
+    if (op.scheduleStep() == 0 && inputIdx == 0) {
         // No data input is required for the initial step.
         // Initialization data is required however.
         return Elts_t::NoneElts();
     }
-    else if (scheduleStep > 0 && inputIdx == 1) {
+    else if (op.scheduleStep() > 0 && inputIdx == 1) {
         // No initialization data is required after the initial step.
         return Elts_t::NoneElts();
     }
@@ -45,10 +44,8 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getRequiredMemory(const Aidge::IOIndex_t o
     assert(mOp.getRawOutput(outputIdx) && "requires valid output");
 
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
-    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
 
-    if (endStep > 0 && outputIdx == 1 && scheduleStep >= endStep) {
+    if ((op.endStep() > 0) && (outputIdx == 1) && (op.scheduleStep() >= op.endStep())) {
         return Elts_t::NoneElts();
     }
     else {
@@ -60,18 +57,15 @@ void Aidge::Memorize_OpImpl::updateConsummerProducer() {
     OperatorImpl::updateConsummerProducer();
 
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
-    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
-    AIDGE_ASSERT(endStep == 0 || scheduleStep <= endStep, "cannot update consumer producer anymore, number of cycles exceeded");
+    AIDGE_ASSERT(op.endStep() == 0 || op.scheduleStep() <= op.endStep(), "cannot update consumer producer anymore, number of cycles exceeded");
 }
 
 void Aidge::Memorize_OpImpl::forward() {
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int forwardStep = op.template getAttr<MemorizeAttr::ForwardStep>();
-    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
-    AIDGE_ASSERT(endStep == 0 || forwardStep <= endStep, "cannot forward anymore, number of cycles exceeded");
 
-    if (forwardStep == 0) {
+    AIDGE_ASSERT((op.endStep() == 0) || (op.forwardStep() <= op.endStep()), "cannot forward anymore, number of cycles exceeded");
+
+    if (op.forwardStep() == 0) {
         op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size());
     }
     else {
@@ -83,28 +77,24 @@ const std::string Aidge::Memorize_Op::Type = "Memorize";
 
 void Aidge::Memorize_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
-    ++this->template getAttr<MemorizeAttr::ScheduleStep>();
-    this->template getAttr<MemorizeAttr::ForwardStep>() = 0;
+    ++mAttributes->template getAttr<MemorizeAttr::ScheduleStep>();
+    mAttributes->template getAttr<MemorizeAttr::ForwardStep>() = 0;
 }
 
 bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) {
-    for (size_t i = 0; i < 2; ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+    if (inputsAssociated(false)) {
+        // Only require one of the input to have dims defined
+        // Otherwise, forwardDims() won't converge!
+        if (!(getInput(0)->empty())) {
+            const auto expectedDims =  getInput(0)->dims();
+            mOutputs[0]->resize(expectedDims);
+            return true;
+        }
+        else if (!(getInput(1)->empty())) {
+            const auto expectedDims =  getInput(1)->dims();
+            mOutputs[0]->resize(expectedDims);
+            return true;
         }
-    }
-
-    // Only require one of the input to have dims defined
-    // Otherwise, forwardDims() won't converge!
-    if (!(getInput(0)->empty())) {
-        const auto expectedDims =  getInput(0)->dims();
-        mOutputs[0]->resize(expectedDims);
-        return true;
-    }
-    else if (!(getInput(1)->empty())) {
-        const auto expectedDims =  getInput(1)->dims();
-        mOutputs[0]->resize(expectedDims);
-        return true;
     }
 
     return false;
@@ -132,6 +122,6 @@ void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
 
 void Aidge::Memorize_Op::forward() {
     Operator::forward();
-    ++this->template getAttr<MemorizeAttr::ForwardStep>();
-    this->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
+    ++mAttributes->template getAttr<MemorizeAttr::ForwardStep>();
+    mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
 }
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 1397b69b9c126c0e2d0ec84bf900a320b95f0d80..e7c50033797c7c984b6b8da69d30f005bc69e70c 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -20,15 +20,16 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph)
-    : OperatorTensor(type, graph->dataInputs().size(), (graph->getOrderedInputs().size() - graph->dataInputs().size()), graph->getOrderedOutputs().size()),
+    : OperatorTensor(type, [graph]() {
+        std::vector<InputCategory> inputsCategory;
+        for (const auto& in : graph->getOrderedInputs()) {
+            inputsCategory.push_back(in.first->getOperator()->inputCategory(in.second));
+        }
+        return inputsCategory;
+    }(), graph->getOrderedOutputs().size()),
         mGraph(graph)
 {
-    mInputs = std::vector<std::shared_ptr<Tensor>>(mGraph->getOrderedInputs().size());
-    for (std::size_t i = 0; i < mInputs.size(); ++i) {
-        mInputs[i] = std::make_shared<Tensor>();
-    }
     // Associate outputs to micro-graph outputs for custom implementation
-    mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->getOrderedOutputs().size());
     for (size_t outputIdx = 0; outputIdx < mOutputs.size(); ++outputIdx) {
         const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
         if (outputOp.first) {
@@ -133,6 +134,20 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) con
     }
 }
 
+void Aidge::MetaOperator_Op::resetConsummerProducer() {
+    if (mImpl) {
+        mImpl->resetConsummerProducer();
+    }
+    else {
+        if (!mScheduler) {
+            // Lazy initialization
+            mScheduler = std::make_shared<SequentialScheduler>(mGraph, mUpperNode.lock());
+        }
+
+        mScheduler->resetScheduling();
+    }
+}
+
 void Aidge::MetaOperator_Op::updateConsummerProducer() {
     if (mImpl) {
         mImpl->updateConsummerProducer();
diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp
index cd993f9e5cd127a005101284b78c416150b3c99a..910e7c67aad0068679ca2d240b23312add3e42d7 100644
--- a/src/operator/MetaOperatorDefs/LSTM.cpp
+++ b/src/operator/MetaOperatorDefs/LSTM.cpp
@@ -38,9 +38,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     auto add = Add(2, (!name.empty()) ? name + "_add" : "");
 
     // Forget gate
-    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_forgetGateX" : "");
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateX" : "");
     input->addChild(forgetGateX, 0, 0);
-    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_forgetGateH" : "");
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateH" : "");
     hiddenState->addChild(forgetGateH, 1, 0);
     auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : "");
     forgetGateX->addChild(forgetGate, 0, 0);
@@ -53,9 +53,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     cellState->addChild(forgetGateMul, 1, 1);
 
     // Input gate
-    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_inputGateX" : "");
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateX" : "");
     input->addChild(inputGateX, 0, 0);
-    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_inputGateH" : "");
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateH" : "");
     hiddenState->addChild(inputGateH, 1, 0);
     auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : "");
     inputGateX->addChild(inputGate, 0, 0);
@@ -67,9 +67,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     inputGateMul->addChild(add, 0, 1);
 
     // Candidate for cell update
-    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_cellCandidateX" : "");
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateX" : "");
     input->addChild(cellCandidateX, 0, 0);
-    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_cellCandidateH" : "");
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateH" : "");
     hiddenState->addChild(cellCandidateH, 1, 0);
     auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : "");
     cellCandidateX->addChild(cellCandidate, 0, 0);
@@ -79,9 +79,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     cellCandidateAct->addChild(inputGateMul, 0, 1);
 
     // Output gate
-    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_outputGateX" : "");
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateX" : "");
     input->addChild(outputGateX, 0, 0);
-    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_outputGateH" : "");
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateH" : "");
     hiddenState->addChild(outputGateH, 1, 0);
     auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : "");
     outputGateX->addChild(outputGate, 0, 0);
@@ -124,19 +124,20 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     addProducer(metaOp, 6, {hiddenChannel, hiddenChannel}, "ro");
     addProducer(metaOp, 7, {hiddenChannel, hiddenChannel}, "rf");
     addProducer(metaOp, 8, {hiddenChannel, hiddenChannel}, "rc");
-    addProducer(metaOp, 9, {(noBias ? 0 : hiddenChannel)}, "wbi");
-    addProducer(metaOp, 10, {(noBias ? 0 : hiddenChannel)}, "wbo");
-    addProducer(metaOp, 11, {(noBias ? 0 : hiddenChannel)}, "wbf");
-    addProducer(metaOp, 12, {(noBias ? 0 : hiddenChannel)}, "wbc");
-    addProducer(metaOp, 13, {(noBias ? 0 : hiddenChannel)}, "rbi");
-    addProducer(metaOp, 14, {(noBias ? 0 : hiddenChannel)}, "rbo");
-    addProducer(metaOp, 15, {(noBias ? 0 : hiddenChannel)}, "rbf");
-    addProducer(metaOp, 16, {(noBias ? 0 : hiddenChannel)}, "rbc");
+    if (!noBias) {
+        addProducer(metaOp, 9, {hiddenChannel}, "wbi");
+        addProducer(metaOp, 10, {hiddenChannel}, "wbo");
+        addProducer(metaOp, 11, {hiddenChannel}, "wbf");
+        addProducer(metaOp, 12, {hiddenChannel}, "wbc");
+        addProducer(metaOp, 13, {hiddenChannel}, "rbi");
+        addProducer(metaOp, 14, {hiddenChannel}, "rbo");
+        addProducer(metaOp, 15, {hiddenChannel}, "rbf");
+        addProducer(metaOp, 16, {hiddenChannel}, "rbc");
+    }
     return metaOp;
 }
 
-std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
-                                         bool noBias)
+std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
 {
     // Construct micro-graph
     auto input = Identity("");
@@ -145,9 +146,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     auto add = Add(2, "");
 
     // Forget gate
-    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(forgetGateX, 0, 0);
-    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(forgetGateH, 1, 0);
     auto forgetGate = Add(2, "");
     forgetGateX->addChild(forgetGate, 0, 0);
@@ -160,9 +161,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     cellState->addChild(forgetGateMul, 1, 1);
 
     // Input gate
-    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(inputGateX, 0, 0);
-    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(inputGateH, 1, 0);
     auto inputGate = Add(2, "");
     inputGateX->addChild(inputGate, 0, 0);
@@ -174,9 +175,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     inputGateMul->addChild(add, 0, 1);
 
     // Candidate for cell update
-    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(cellCandidateX, 0, 0);
-    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(cellCandidateH, 1, 0);
     auto cellCandidate = Add(2, "");
     cellCandidateX->addChild(cellCandidate, 0, 0);
@@ -186,9 +187,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     cellCandidateAct->addChild(inputGateMul, 0, 1);
 
     // Output gate
-    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(outputGateX, 0, 0);
-    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(outputGateH, 1, 0);
     auto outputGate = Add(2,"");
     outputGateX->addChild(outputGate, 0, 0);
diff --git a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
index ad300cd4f98b84d5ac5834370db53017958efaf6..ef319ef38ad18de9eaed0a1d4a92c3877ee7cf8e 100644
--- a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
@@ -46,8 +46,6 @@ std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_
 
 template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
 template std::shared_ptr<Node> PaddedAvgPooling<2>(const std::array<DimSize_t,2>&, const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
-template std::shared_ptr<Node> PaddedAvgPooling<3>(const std::array<DimSize_t,3>&, const std::string&, const std::array<DimSize_t,3>&, const std::array<DimSize_t,6>&);
-template std::shared_ptr<Node> PaddedAvgPooling<4>(const std::array<DimSize_t,4>&, const std::string&, const std::array<DimSize_t,4>&, const std::array<DimSize_t,8>&);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
@@ -61,8 +59,6 @@ std::shared_ptr<Node> PaddedAvgPooling(const DimSize_t (&kernel_dims)[DIM],
 
 template std::shared_ptr<Node> PaddedAvgPooling<1>(const DimSize_t (&kernel_dims)[1], const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
 template std::shared_ptr<Node> PaddedAvgPooling<2>(const DimSize_t (&kernel_dims)[2], const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
-template std::shared_ptr<Node> PaddedAvgPooling<3>(const DimSize_t (&kernel_dims)[3], const std::string&, const std::array<DimSize_t,3>&, const std::array<DimSize_t,6>&);
-template std::shared_ptr<Node> PaddedAvgPooling<4>(const DimSize_t (&kernel_dims)[4], const std::string&, const std::array<DimSize_t,4>&, const std::array<DimSize_t,8>&);
 
 
 //////////////////////////////////
@@ -84,8 +80,5 @@ inline std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<Dim
 
 template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
 template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<2>(const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
-template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<3>(const std::array<DimSize_t,3>&, const std::array<DimSize_t,3>&, const std::array<DimSize_t,6>&);
-template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<4>(const std::array<DimSize_t,4>&, const std::array<DimSize_t,4>&, const std::array<DimSize_t,8>&);
-
 
 } // namespace Aidge
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index 426de388f31391fb5e59446d50e50de94ca5f8a1..ded67a11acd299e5407f0d7e74146f5bcd1bf86a 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -24,13 +24,7 @@
 const std::string Aidge::Mul_Op::Type = "Mul";
 
 bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index 84d42c089baecdd78c35506a693b05a8ed728fd9..5df90020a43ad6cffebcd2345c075837f11462b1 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -9,7 +9,6 @@
  *
  ********************************************************************************/
 
-#include <cassert>
 #include <memory>
 
 #include "aidge/operator/OperatorTensor.hpp"
@@ -20,11 +19,10 @@
 
 
 Aidge::OperatorTensor::OperatorTensor(const std::string& type,
-                                                            const IOIndex_t nbData,
-                                                            const IOIndex_t nbParam,
+                                      const std::vector<InputCategory>& inputsCategory,
                                                             const IOIndex_t nbOut)
-: Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor),
-        mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)),
+: Operator(type, inputsCategory, nbOut, OperatorType::Tensor),
+        mInputs(std::vector<std::shared_ptr<Tensor>>(inputsCategory.size(), nullptr)),
         mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) {
     for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) {
         mOutputs[i] = std::make_shared<Tensor>();
@@ -51,6 +49,11 @@ void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, cons
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
 }
 
+void Aidge::OperatorTensor::resetInput(const Aidge::IOIndex_t inputIdx) {
+    AIDGE_ASSERT(inputIdx < nbInputs(), "Input idx out of range.");
+    mInputs[inputIdx] = nullptr;
+}
+
 void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
     if (getInput(inputIdx)) {
@@ -98,9 +101,6 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
     if (outputIdx >= nbOutputs()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range.");
     }
-    if (nbInputs() != nbData()) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function.");
-    }
     if (!dimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
     }
@@ -110,19 +110,28 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
         }
     }
     // return the same Tensor description as given in function parameter for each data input
-    return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
+    return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbInputs(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
 }
 
-bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = (nbInputs() > 0); // do not compute anything if no input
+bool Aidge::OperatorTensor::inputsAssociated(bool checkNonEmpty) const {
+    bool associated = true;
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        if (inputCategory(i) != InputCategory::OptionalData && inputCategory(i) != InputCategory::OptionalParam) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+            }
+        }
+
+        if (checkNonEmpty && getInput(i)) {
+            associated &= !(getInput(i)->empty());
         }
-        associated &= !(getInput(i)->empty());
     }
-    if (associated) {
+
+    return associated;
+}
+
+bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
         const auto expectedDims =  getInput(0)->dims();
         for (std::size_t i = 1; i < nbInputs(); ++i) {
             if (expectedDims != getInput(i)->dims()) {
@@ -132,16 +141,19 @@ bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
             }
         }
         mOutputs[0]->resize(expectedDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 bool Aidge::OperatorTensor::dimsForwarded() const {
     bool forwarded = true;
     // check both inputs and outputs have been filled
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
+        if (inputCategory(i) != InputCategory::OptionalData && inputCategory(i) != InputCategory::OptionalParam) {
+            forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
+        }
     }
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
         // If getOutput(i) is nullptr, ignore this output (it may be a dummy
@@ -157,9 +169,14 @@ void Aidge::OperatorTensor::setDataType(const DataType& dataType) const {
     }
 
     // Set data type for parameters inputs only (weights, bias...), which are usually Producers
-    for (IOIndex_t i = nbData(); i < nbInputs(); ++i) {
-        AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
-        getInput(i)->setDataType(dataType);
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (inputCategory(i) == InputCategory::Param) {
+            AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
+            getInput(i)->setDataType(dataType);
+        }
+        else if (inputCategory(i) == InputCategory::OptionalParam && getInput(i) != nullptr) {
+            getInput(i)->setDataType(dataType);
+        }
     }
 }
 
@@ -169,9 +186,14 @@ void Aidge::OperatorTensor::setDataFormat(const DataFormat& dataFormat) const {
     }
 
     // Set data format for parameters inputs only (weights, bias...), which are usually Producers
-    for (IOIndex_t i = nbData(); i < nbInputs(); ++i) {
-        AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
-        getInput(i)->setDataFormat(dataFormat);
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (inputCategory(i) == InputCategory::Param) {
+            AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
+            getInput(i)->setDataFormat(dataFormat);
+        }
+        else if (inputCategory(i) == InputCategory::OptionalParam && getInput(i) != nullptr) {
+            getInput(i)->setDataFormat(dataFormat);
+        }
     }
 }
 
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c66e6c84af6df299e4786bbbb73767d6ee6374f5
--- /dev/null
+++ b/src/operator/Pad.cpp
@@ -0,0 +1,19 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::Pad_Op<DIM>::Type = "Pad";
+
+template class Aidge::Pad_Op<1>;
+template class Aidge::Pad_Op<2>;
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 18325d80a94f35878ededca839ec809000527c39..2fcc46a460ffd7c7f6746dfcd108acbaafe912de 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -30,19 +30,15 @@ Aidge::Elts_t Aidge::Pop_OpImpl::getNbRequiredData(const Aidge::IOIndex_t inputI
 
 void Aidge::Pop_OpImpl::forward() {
     const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+
     assert(op.getInput(0) && "missing input #0");
-    const unsigned int forwardStep = op.template getAttr<PopAttr::ForwardStep>();
-    *op.getOutput(0) = op.getInput(0)->extract({forwardStep});
+    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()});
 }
 
 const std::string Aidge::Pop_Op::Type = "Pop";
 
 bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-    if (!(getInput(0)->empty())) {
+    if (inputsAssociated()) {
         auto inputDims = getInput(0)->dims();
         inputDims.erase(inputDims.begin());
         getOutput(0)->resize(inputDims);
@@ -54,7 +50,7 @@ bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
 
 void Aidge::Pop_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
-    this->template getAttr<PopAttr::ForwardStep>() = 0;
+    mAttributes->template getAttr<PopAttr::ForwardStep>() = 0;
 }
 
 void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
@@ -69,5 +65,5 @@ void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
 
 void Aidge::Pop_Op::forward() {
     Operator::forward();
-    ++this->template getAttr<PopAttr::ForwardStep>();
+    ++mAttributes->template getAttr<PopAttr::ForwardStep>();
 }
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 135c792345b0caf1166e671a8dad7d5b49b42ee7..2a50f9c7bad1e40cd6e69cfc0a22632439cfe000 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -23,13 +23,7 @@
 const std::string Aidge::Pow_Op::Type = "Pow";
 
 bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 7059ea7e989d789b4cff0ed895fc2c5ec0ad81bc..bdb69452ec54fb635d0cbc299336071295f37ae1 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -28,8 +28,9 @@ const std::string Aidge::Producer_Op::Type = "Producer";
 
 
 Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant)
-    : OperatorTensor(Type, 0, 0, 1),
-      Attributes_(attr<ProdAttr::Constant>(constant))
+    : OperatorTensor(Type, {}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+        attr<ProdAttr::Constant>(constant)))
 {
     mOutputs[0] = tensor; // copy the pointer of the Tensor
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
@@ -47,7 +48,7 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
  */
 Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     : OperatorTensor(op),
-      Attributes_(op)
+      mAttributes(op.mAttributes)
 {
     mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0)));
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 28e39b6d3387a0371c0505dc0a7b350e83a2bbaf..96f2f855f46275e167acb1300434f8bcdbdd7d3e 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -27,12 +27,9 @@
 const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 
 bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
-    }
-    if (!getInput(0)->empty()) {
+    if (inputsAssociated()) {
         // make Axes attribute positive
-        std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
+        std::vector<std::int32_t>& axes = mAttributes->template getAttr<ReduceMeanAttr::Axes>();
         std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
             if (val < 0)
                 val+=static_cast<std::int32_t>(getInput(0)->nbDims());
@@ -41,7 +38,7 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
 
         // build output dimensions
         std::vector<DimSize_t> outDims = getInput(0)->dims();
-        if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
+        if (mAttributes->template getAttr<ReduceMeanAttr::KeepDims>()) {
             std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
         }
         else {
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index adbd5fae8a11bfc5009ed4b920d28624db71bb0d..1838c008a6b83548b6a5a80af0363e2cf239b649 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -40,68 +40,65 @@ bool Aidge::Reshape_Op::dimsForwarded() const {
 }
 
 bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
-    // check input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-
-    if (getInput(0)->empty()) {
-        return false;
-    }
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute Shape
+        if (getInput(1)) {
+            if (!this->shape().empty()) {
+                Log::notice("Reshape_Op: ignoring non-empty Shape attribute because input#1 takes precedence");
+            }
 
-    if (getInput(1) && !getInput(1)->empty()) {
-        if (!this->template getAttr<ReshapeAttr::Shape>().empty()) {
-            Log::notice("Reshape_Op: ignoring non-empty Shape attribute because input#1 takes precedence");
-        }
+            if (!allowDataDependency) {
+                Log::warn("Reshape_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
 
-        if (!allowDataDependency) {
-            Log::warn("Reshape_Op: unable to forwardDims() because output dims are data dependent on input#1");
-            return false;
+            std::shared_ptr<Tensor> fallback;
+            this->shape().clear(); // If both are provided input would override attrs
+            this->shape().reserve(getInput(1)->size());
+            const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(shape.getImpl()->hostPtr()),
+                        shape.size(),
+                        std::back_inserter(this->shape()));
         }
 
-        std::shared_ptr<Tensor> fallback;
-        this->template getAttr<ReshapeAttr::Shape>().clear(); // If both are provided input would override attrs
-        this->template getAttr<ReshapeAttr::Shape>().reserve(getInput(1)->size());
-        const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(shape.getImpl()->hostPtr()),
-                    shape.size(),
-                    std::back_inserter(this->template getAttr<ReshapeAttr::Shape>()));
-    }
+        AIDGE_ASSERT(!this->shape().empty(), "Missing input#1 or Shape attribute");
 
-    AIDGE_ASSERT(!this->template getAttr<ReshapeAttr::Shape>().empty(), "Missing input#1 or Shape attribute");
-
-    std::vector<DimSize_t> outDims;
-    // variables to handle a negative dimension
-    bool foundNegativeDimension = false;
-    std::size_t outSize = 1;
-    DimIdx_t negativeIndex = 0;
-    for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i)
-    {
-        int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i];
-        if (dimSize < 0) {
-            if (foundNegativeDimension) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
-            }
-            foundNegativeDimension = true;
-            dimSize = 1;
-            negativeIndex = static_cast<DimIdx_t>(i);
-        }
-        else if (dimSize == 0 && !this->template getAttr<ReshapeAttr::AllowZero>())
+        // Compute output dims
+        std::vector<DimSize_t> outDims;
+        // variables to handle a negative dimension
+        bool foundNegativeDimension = false;
+        std::size_t outSize = 1;
+        DimIdx_t negativeIndex = 0;
+        for(std::size_t i = 0; i < this->shape().size(); ++i)
         {
-            dimSize = getInput(0) -> dims()[i];
+            int64_t dimSize = this->shape()[i];
+            if (dimSize < 0) {
+                if (foundNegativeDimension) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
+                }
+                foundNegativeDimension = true;
+                dimSize = 1;
+                negativeIndex = static_cast<DimIdx_t>(i);
+            }
+            else if (dimSize == 0 && !mAttributes->template getAttr<ReshapeAttr::AllowZero>())
+            {
+                dimSize = getInput(0) -> dims()[i];
+            }
+            outDims.push_back(static_cast<DimSize_t>(dimSize));
+            if (dimSize != 0) {
+                outSize *= static_cast<DimSize_t>(dimSize);
+            }
         }
-        outDims.push_back(static_cast<DimSize_t>(dimSize));
-        if (dimSize != 0) {
-            outSize *= static_cast<DimSize_t>(dimSize);
+
+        if (foundNegativeDimension) {
+            outDims[negativeIndex] = (getInput(0) -> size()) / outSize;
         }
-    }
 
-    if (foundNegativeDimension) {
-        outDims[negativeIndex] = (getInput(0) -> size()) / outSize;
+        mOutputs[0]->resize(outDims);
+        return true;
     }
 
-    mOutputs[0]->resize(outDims);
-    return true;
+    return false;
 }
 
 void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..966e1c3e032e64e75d3606fca022b84f9da8fbaf
--- /dev/null
+++ b/src/operator/Resize.cpp
@@ -0,0 +1,121 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Resize.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int64_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+#include <fmt/core.h>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Resize_Op::Type = "Resize";
+
+bool Aidge::Resize_Op::dimsForwarded() const {
+    // in case of ROI add getInput(1) condition
+    if ((getInput(1) && !getInput(1)->empty())
+        || (getInput(2) && !getInput(2)->empty())
+        || (getInput(3) && !getInput(3)->empty())
+        )
+    {
+        // output dims are data dependent
+        return false;
+    }
+
+    return OperatorTensor::dimsForwarded();
+}
+
+bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(getInput(0)->nbDims() == 4,
+            "input tensor must have dimensions = 4 (batch, channel, height, width).");
+
+        const bool input1ROIPresent           = getInput(1) && !getInput(1)->empty();
+        const bool input2ScalesPresent        = getInput(2) && !getInput(2)->empty();
+        const bool input3SizesPresent         = getInput(3) && !getInput(3)->empty();
+
+        AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and  sizes can be specified.")
+
+        if (input1ROIPresent) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Input #1 (ROI) is given and it is not supported.");
+        }
+        else if (input2ScalesPresent)  {
+            if (!allowDataDependency) {
+                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #2");
+                return false;
+            }
+
+            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(2)->size(),
+                "input #0 and input #2 (Scales) must have the same dimensions.");
+
+            std::vector<DimSize_t>      outDims = getInput(0)->dims();
+            const std::vector<DimSize_t> inDims = getInput(0)->dims();
+
+            std::shared_ptr<Tensor> fallback;
+            const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+
+            for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) {
+                outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
+            }
+
+            mOutputs[0]->resize(outDims);
+            return true;
+        }
+        else if (input3SizesPresent) {
+            if (!allowDataDependency) {
+                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #3");
+                return false;
+            }
+
+            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(3)->size(),
+                "input #0 and input #3 (Sizes) must have the same dimensions.");
+
+            std::vector<DimSize_t> outDims = getInput(0)->dims();
+
+            std::shared_ptr<Tensor> fallback;
+            const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+
+            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {            
+                outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
+            }
+            
+            mOutputs[0]->resize(outDims);
+            return true;
+        }
+        else {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: Either Input #2 or Input #3 must be present.");
+        }
+    }
+
+    return false; 
+}
+
+void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Resize_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+
+    // By default, automatically set backend for all inputs: roi, scales and sizes 
+    if(getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    if(getInput(2)) {
+        getInput(2)->setBackend(name, device);
+    }
+    if(getInput(3)) {
+        getInput(3)->setBackend(name, device);
+    }
+}
\ No newline at end of file
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index d11cf39e1cd301d49f21863dcb1f250e96c6e502..8166712e1e5fd967bb9328e95ecf8c5388636ba7 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -21,10 +21,10 @@
 
 void Aidge::Shape_OpImpl::forward() {
     const Shape_Op& op = dynamic_cast<const Shape_Op&>(mOp);
-    const auto start = op.template getAttr<std::int64_t>("Start");
-    const auto end = op.template getAttr<std::int64_t>("End");
+    const auto start = op.start();
+    const auto end = op.end();
 
-    op.getOutput(0)->getImpl()->copyCast(std::next(op.getInput(0)->dims().data(), 
+    op.getOutput(0)->getImpl()->copyCast(std::next(op.getInput(0)->dims().data(),
                                                    start),
                                          DataType::UInt64,
                                          end - start + 1);
@@ -33,30 +33,25 @@ void Aidge::Shape_OpImpl::forward() {
 const std::string Aidge::Shape_Op::Type = "Shape";
 
 bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check data input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-
-    if (getInput(0)->empty()) {
-        return false;
-    }
+    if (inputsAssociated()) {
+        if (mAttributes->template getAttr<std::int64_t>("Start") < 0)
+            mAttributes->template getAttr<std::int64_t>("Start") += static_cast<std::int64_t>(getInput(0)->nbDims());
+        if (mAttributes->template getAttr<std::int64_t>("End") < 0)
+            mAttributes->template getAttr<std::int64_t>("End") += static_cast<std::int64_t>(getInput(0)->nbDims());
 
-    if (this->template getAttr<std::int64_t>("Start") < 0)
-        this->template getAttr<std::int64_t>("Start") += static_cast<std::int64_t>(getInput(0)->nbDims());
-    if (this->template getAttr<std::int64_t>("End") < 0)
-        this->template getAttr<std::int64_t>("End") += static_cast<std::int64_t>(getInput(0)->nbDims());
+        const auto start = mAttributes->template getAttr<std::int64_t>("Start");
+        const auto end = mAttributes->template getAttr<std::int64_t>("End");
+        const auto nbDims = static_cast<std::int64_t>(getInput(0)->nbDims());
+        const DimSize_t roi = end - start + 1;
 
-    const auto start = this->template getAttr<std::int64_t>("Start");
-    const auto end = this->template getAttr<std::int64_t>("End");
-    const auto nbDims = static_cast<std::int64_t>(getInput(0)->nbDims());
-    const DimSize_t roi = end - start + 1;
+        AIDGE_ASSERT(start < nbDims && end < nbDims, "'Start' and 'End' must be < {}", nbDims);
+        AIDGE_ASSERT(roi> 1, "Unvalid ROI for Shape");
 
-    AIDGE_ASSERT(start < nbDims && end < nbDims, "'Start' and 'End' must be < {}", nbDims);
-    AIDGE_ASSERT(roi> 1, "Unvalid ROI for Shape");
+        mOutputs[0]->resize({roi});
+        return true;
+    }
 
-    mOutputs[0]->resize({roi});
-    return true;
+    return false;
 }
 
 void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index aca13b94cb46576d515a6f12c436431d49e0652b..3cc2de686435a304326e2a4a60dad6c12a50349c 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -42,134 +42,135 @@ bool Aidge::Slice_Op::dimsForwarded() const {
 }
 
 bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-
-    if (getInput(0)->empty()) {
-        return false;
-    }
+    if (inputsAssociated()) {
+        std::shared_ptr<Tensor> fallback;
+        // Copy optional input #1, if present, to attribute Starts
+        if (getInput(1)) {
+            if (!this->starts().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Starts attribute because input#1 takes precedence");
+            }
 
-   std::shared_ptr<Tensor> fallback;
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
 
-    if (getInput(1) && !getInput(1)->empty()) {
-        if (!this->template getAttr<SliceAttr::Starts>().empty()) {
-            Log::notice("Slice_Op: ignoring non-empty Starts attribute because input#1 takes precedence");
+            this->starts().clear(); // If both are provided input would override attrs
+            this->starts().reserve(getInput(1)->size());
+            const auto& starts = getInput(1)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(starts.getImpl()->hostPtr()),
+                        starts.size(),
+                        std::back_inserter(this->starts()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#1");
-            return false;
-        }
+        AIDGE_ASSERT(!this->starts().empty(), "Missing input#1 or Starts attribute");
 
-        this->template getAttr<SliceAttr::Starts>().clear(); // If both are provided input would override attrs
-        this->template getAttr<SliceAttr::Starts>().reserve(getInput(1)->size());
-        const auto& starts = getInput(1)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(starts.getImpl()->hostPtr()),
-                    starts.size(),
-                    std::back_inserter(this->template getAttr<SliceAttr::Starts>()));
-    }
+        // Copy optional input #2, if present, to attribute Ends
+        if (getInput(2)) {
+            if (!this->ends().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Ends attribute because input#2 takes precedence");
+            }
 
-    AIDGE_ASSERT(!this->template getAttr<SliceAttr::Starts>().empty(), "Missing input#1 or Starts attribute");
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#2");
+                return false;
+            }
 
-    if (getInput(2) && !getInput(2)->empty()) {
-        if (!this->template getAttr<SliceAttr::Ends>().empty()) {
-            Log::notice("Slice_Op: ignoring non-empty Ends attribute because input#2 takes precedence");
+            this->ends().clear(); // If both are provided input would override attrs
+            this->ends().reserve(getInput(2)->size());
+            const auto& ends = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(ends.getImpl()->hostPtr()),
+                        ends.size(),
+                        std::back_inserter(this->ends()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#2");
-            return false;
-        }
+        AIDGE_ASSERT(!this->ends().empty(), "Missing input#2 or Ends attribute");
 
-        this->template getAttr<SliceAttr::Ends>().clear(); // If both are provided input would override attrs
-        this->template getAttr<SliceAttr::Ends>().reserve(getInput(2)->size());
-        const auto& ends = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(ends.getImpl()->hostPtr()),
-                    ends.size(),
-                    std::back_inserter(this->template getAttr<SliceAttr::Ends>()));
-    }
+        // Copy optional input #3, if present, to attribute Axes
+        if (getInput(3)) {
+            if (!this->axes().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Axes attribute because input#3 takes precedence");
+            }
 
-    AIDGE_ASSERT(!this->template getAttr<SliceAttr::Ends>().empty(), "Missing input#2 or Ends attribute");
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#3");
+                return false;
+            }
 
-    if (getInput(3) && !getInput(3)->empty()) {
-        if (!this->template getAttr<SliceAttr::Axes>().empty()) {
-            Log::notice("Slice_Op: ignoring non-empty Axes attribute because input#3 takes precedence");
+            this->axes().clear(); // If both are provided input would override attrs
+            this->axes().reserve(getInput(3)->size());
+            const auto& axes = getInput(3)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+            std::copy_n(static_cast<int8_t*>(axes.getImpl()->hostPtr()),
+                        axes.size(),
+                        std::back_inserter(this->axes()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#3");
-            return false;
-        }
+        AIDGE_ASSERT(!this->axes().empty(), "Missing input#3 or Axes attribute");
 
-        this->template getAttr<SliceAttr::Axes>().clear(); // If both are provided input would override attrs
-        this->template getAttr<SliceAttr::Axes>().reserve(getInput(3)->size());
-        const auto& axes = getInput(3)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
-        std::copy_n(static_cast<int8_t*>(axes.getImpl()->hostPtr()),
-                    axes.size(),
-                    std::back_inserter(this->template getAttr<SliceAttr::Axes>()));
-    }
+        // Copy optional input #4, if present, to attribute Steps
+        if (getInput(4)) {
+            if (!this->steps().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Steps attribute because input#4 takes precedence");
+            }
 
-    AIDGE_ASSERT(!this->template getAttr<SliceAttr::Axes>().empty(), "Missing input#3 or Axes attribute");
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#4");
+                return false;
+            }
 
-    if (getInput(4) && !getInput(4)->empty()) {
-        if (!this->template getAttr<SliceAttr::Steps>().empty()) {
-            Log::notice("Slice_Op: ignoring non-empty Steps attribute because input#4 takes precedence");
+            this->steps().clear(); // If both are provided input would override attrs
+            this->steps().reserve(getInput(4)->size());
+            const auto& steps = getInput(4)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(steps.getImpl()->hostPtr()),
+                        steps.size(),
+                        std::back_inserter(this->steps()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#4");
-            return false;
+        // Fill Steps attr if empty
+        if(this->steps().empty()) {
+            // In case the input Steps is not provided, default value is 1
+            this->steps() = std::vector<std::int64_t>(this->axes().size(), 1);
         }
 
-        this->template getAttr<SliceAttr::Steps>().clear(); // If both are provided input would override attrs
-        this->template getAttr<SliceAttr::Steps>().reserve(getInput(4)->size());
-        const auto& steps = getInput(4)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(steps.getImpl()->hostPtr()),
-                    steps.size(),
-                    std::back_inserter(this->template getAttr<SliceAttr::Steps>()));
-    }
-    // Fill Steps attr if empty
-    if(this->template getAttr<SliceAttr::Steps>().empty()) {
-        // In case the input Steps is not provided, default value is 1
-        this->template getAttr<SliceAttr::Steps>() = std::vector<std::int64_t>(this->template getAttr<SliceAttr::Axes>().size(), 1);
-    }
-
-    const DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
-    std::vector<DimSize_t> outDims = getInput(0)->dims();
-    for (std::size_t i = 0; i < nbAxes; ++i) {
-        const DimIdx_t axis = this->template getAttr<SliceAttr::Axes>()[i] >= 0 ?
-                        static_cast<DimIdx_t>(this->template getAttr<SliceAttr::Axes>()[i]) :
-                        static_cast<DimIdx_t>(this->template getAttr<SliceAttr::Axes>()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims()));
-        const DimSize_t start = this->template getAttr<SliceAttr::Starts>()[i] >= 0 ?
-                            static_cast<DimSize_t>(this->template getAttr<SliceAttr::Starts>()[i]) :
-                            static_cast<DimSize_t>(this->template getAttr<SliceAttr::Starts>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
-        const DimSize_t end = this->template getAttr<SliceAttr::Ends>()[i] >= 0 ?
-                        static_cast<DimSize_t>(this->template getAttr<SliceAttr::Ends>()[i]) :
-                        static_cast<DimSize_t>(this->template getAttr<SliceAttr::Ends>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
-        const std::int64_t step = this->template getAttr<SliceAttr::Steps>()[i];
-
-        AIDGE_ASSERT(step != 0, "Slice_Op: Step must be a non-zero value!");
-        if(step * (static_cast<int64_t>(end) - static_cast<int64_t>(start)) < 0) {
-            if(step < 0) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step is negative we must have End < Start", type());
-            }
-            else {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step is positive we must have Start < End", type());
+        // Compute output dims
+        const DimSize_t nbAxes = this->axes().size();
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        for (std::size_t i = 0; i < nbAxes; ++i) {
+            const DimIdx_t axis = this->axes()[i] >= 0 ?
+                            static_cast<DimIdx_t>(this->axes()[i]) :
+                            static_cast<DimIdx_t>(this->axes()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims()));
+            const DimSize_t start = this->starts()[i] >= 0 ?
+                                static_cast<DimSize_t>(this->starts()[i]) :
+                                static_cast<DimSize_t>(this->starts()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+            const DimSize_t end = this->ends()[i] >= 0 ?
+                            static_cast<DimSize_t>(this->ends()[i]) :
+                            static_cast<DimSize_t>(this->ends()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+            const std::int64_t step = this->steps()[i];
+
+            AIDGE_ASSERT(step != 0, "Slice_Op: Step ({}) must have a non-zero value on axis {}!", this->steps(), axis);
+            if(step * (static_cast<int64_t>(end) - static_cast<int64_t>(start)) < 0) {
+                if(step < 0) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step ({}) is negative, we must have End ({}) < Start ({}) on axis {}", type(), step, end, start, axis);
+                }
+                else {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step ({}) is positive, we must have Start ({}) < End ({}) on axis {}", type(), step, start, end, axis);
+                }
             }
-        }
 
-        const std::size_t sliceLength = static_cast<std::size_t>(std::ceil((static_cast<float>(end) - static_cast<float>(start)) / static_cast<float>(step)));
-        // Check if slice length is valid
-        if (sliceLength > getInput(0)->dims()[axis])
-        {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Slice_Op: ROI of Slice operator out of bounds");
+            const std::size_t sliceLength = static_cast<std::size_t>(std::ceil((static_cast<float>(end) - static_cast<float>(start)) / static_cast<float>(step)));
+            // Check if slice length is valid
+            if (sliceLength > getInput(0)->dims()[axis])
+            {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Slice_Op: ROI ({}) of Slice operator out of bounds ({}) on axis {}, with (Start, End, Step) = ({}, {}, {})",
+                    sliceLength, getInput(0)->dims()[axis], axis, start, end, step);
+            }
+            outDims[axis] = sliceLength;
         }
-        outDims[axis] = sliceLength;
+        mOutputs[0]->resize(outDims);
+        return true;
     }
-    mOutputs[0]->resize(outDims);
-    return true;
+
+    return false;
 }
 
 void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index 5d0493ea4da0b80bf572a33fa4ee466804d0d270..a0cb049b19e9411daf65bbe2a10319c62b32c1b8 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -65,66 +65,62 @@ bool Aidge::Split_Op::dimsForwarded() const {
 }
 
 bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-
-    if (getInput(0)->empty()) {
-        return false;
-    }
-
-    std::shared_ptr<Tensor> fallback;
-
-    if (getInput(1) && !getInput(1)->empty()) { // Split is given, replace
-        if (!this->template getAttr<SplitAttr::Split>().empty()) {
-            Log::notice("Split_Op: ignoring non-empty Split attribute because input#1 takes precedence");
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute Split
+        if (getInput(1)) {
+            if (!this->template getAttr<SplitAttr::Split>().empty()) {
+                Log::notice("Split_Op: ignoring non-empty Split attribute because input#1 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("Split_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            std::shared_ptr<Tensor> fallback;
+            this->template getAttr<SplitAttr::Split>().reserve(getInput(1)->size());
+            const auto& splits = getInput(1)->refCastFrom(fallback, NativeType<DimSize_t>::type, "cpu");
+            std::copy_n(static_cast<DimSize_t*>(splits.getImpl()->hostPtr()),
+                        splits.size(),
+                        std::back_inserter(this->template getAttr<SplitAttr::Split>()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Split_Op: unable to forwardDims() because output dims are data dependent on input#1");
-            return false;
-        }
+        // Compute output dims
+        if (this->template getAttr<std::int8_t>("Axis") < 0)
+            this->template getAttr<std::int8_t>("Axis") += static_cast<std::int8_t>(getInput(0)->nbDims());
 
-        this->template getAttr<SplitAttr::Split>().reserve(getInput(1)->size());
-        const auto& splits = getInput(1)->refCastFrom(fallback, NativeType<DimSize_t>::type, "cpu");
-        std::copy_n(static_cast<DimSize_t*>(splits.getImpl()->hostPtr()),
-                    splits.size(),
-                    std::back_inserter(this->template getAttr<SplitAttr::Split>()));
-    }
+        DimSize_t dimToSplit = getInput(0)->dims()[this->template getAttr<std::int8_t>("Axis")];
+        DimSize_t nbOutput = this->nbOutputs();
+        // Fill Split attr if empty
+        if(this->template getAttr<SplitAttr::Split>().empty()) {
+            // In case the input Split is not provided, divide the dimension of Axis into equal slices
+            AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} musn't be bigger than dimension {}.", nbOutput, dimToSplit);
+            DimSize_t baseSliceSize = dimToSplit / nbOutput;
 
-    if (this->template getAttr<std::int8_t>("Axis") < 0)
-        this->template getAttr<std::int8_t>("Axis") += static_cast<std::int8_t>(getInput(0)->nbDims());
+            DimSize_t remainder = dimToSplit % nbOutput;
 
-    DimSize_t dimToSplit = getInput(0)->dims()[this->template getAttr<std::int8_t>("Axis")];
-    DimSize_t nbOutput = this->nbOutputs();
-    // Fill Split attr if empty
-    if(this->template getAttr<SplitAttr::Split>().empty()) {
-        // In case the input Split is not provided, divide the dimension of Axis into equal slices
-        AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} musn't be bigger than dimension {}.", nbOutput, dimToSplit);
-        DimSize_t baseSliceSize = dimToSplit / nbOutput;
+            for (DimSize_t i = 0; i < static_cast<DimSize_t>(nbOutput -1); ++i) {
+                    this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize);
+            }
+            this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize + remainder);
+        }
 
-        DimSize_t remainder = dimToSplit % nbOutput;
+        const auto splits = this->template getAttr<SplitAttr::Split>();
+        AIDGE_ASSERT(splits.size() == nbOutput, "Split_Op: number of slices {} must be equal to number of outputs {}", splits, nbOutput);
+        DimSize_t totalSplitSize = std::accumulate(splits.cbegin(), splits.cend(), 0);
+        AIDGE_ASSERT(totalSplitSize == dimToSplit, "Split_Op: Total chunks size {} is different from dimension size {}.", totalSplitSize, dimToSplit);
 
-        for (DimSize_t i = 0; i < static_cast<DimSize_t>(nbOutput -1); ++i) {
-                this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize);
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        for (std::size_t i = 0; i < nbOutput; ++i)
+        {
+            outDims[this->template getAttr<std::int8_t>("Axis")] = this->template getAttr<SplitAttr::Split>()[i];
+            mOutputs[i]->resize(outDims);
         }
-        this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize + remainder);
-    }
-
-    const auto splits = this->template getAttr<SplitAttr::Split>();
-    AIDGE_ASSERT(splits.size() == nbOutput, "Split_Op: number of slices {} must be equal to number of outputs {}", splits, nbOutput);
-    DimSize_t totalSplitSize = std::accumulate(splits.cbegin(), splits.cend(), 0);
-    AIDGE_ASSERT(totalSplitSize == dimToSplit, "Split_Op: Total chunks size {} is different from dimension size {}.", totalSplitSize, dimToSplit);
 
-    std::vector<DimSize_t> outDims = getInput(0)->dims();
-    for (std::size_t i = 0; i < nbOutput; ++i)
-    {
-        outDims[this->template getAttr<std::int8_t>("Axis")] = this->template getAttr<SplitAttr::Split>()[i];
-        mOutputs[i]->resize(outDims);
+        return true;
     }
-
-    return true;
+    
+    return false;
 }
 
 void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index b977f4ee7ccce32d7f7929cbee99140aea36cd2f..858b32beaf9e23e8e9e7f52cfe7176afe399843c 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -25,13 +25,7 @@
 const std::string Aidge::Sub_Op::Type = "Sub";
 
 bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 7b20366576b16868af20947a2248ae3e2df85650..69820a924105acc8bea817aecb90e0aa278fce06 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -25,22 +25,16 @@
 
 void Aidge::TransposeImpl::forward() {
     const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
-    op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.getAttr<std::vector<DimSize_t>>(0));
+    op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.outputDimsOrder());
 }
 
 const std::string Aidge::Transpose_Op::Type = "Transpose";
 
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
-    }
-
-    if (!getInput(0)->empty()) {
-        const auto& outDimsOrder = getAttr<std::vector<DimSize_t>>(0);
+    if (inputsAssociated()) {
         std::vector<DimSize_t> outputDims;
-        for (std::size_t i = 0; i < outDimsOrder.size(); ++i) {
-            outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
+        for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
+            outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
         }
         mOutputs[0]->resize(outputDims);
         return true;
diff --git a/src/recipes/ConstantFolding.cpp b/src/recipes/ConstantFolding.cpp
index 42fb45224614ca2655165a69b974cfe229e27f90..40b0bda766ab243805349b13e93391c5a60df63a 100644
--- a/src/recipes/ConstantFolding.cpp
+++ b/src/recipes/ConstantFolding.cpp
@@ -44,7 +44,7 @@ void Aidge::constantFolding(std::shared_ptr<GraphView> graph) {
                     }
 
                     const auto& producer = std::static_pointer_cast<Producer_Op>(input.first->getOperator());
-                    if (!producer->getAttr<bool>("Constant")) {
+                    if (!producer->constant()) {
                         Log::info("Node {} (of type {}) not foldable because Producer input {} not Constant",
                             node->name(), node->type(), input.first->name());
                         foldable = false;
diff --git a/src/recipes/ExplicitCastMove.cpp b/src/recipes/ExplicitCastMove.cpp
index 7d836c3acc835c5ed3fe014db6787029dc318afd..c860b9e8a0e1fcbf467eb13e1366f371d731a47d 100644
--- a/src/recipes/ExplicitCastMove.cpp
+++ b/src/recipes/ExplicitCastMove.cpp
@@ -73,7 +73,7 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) {
 
         IOIndex_t inputIdx = 0;
         for (auto parent : node->inputs()) {
-            // TODO: possible optimization: currently, a Cast/Move Operator may 
+            // TODO: possible optimization: currently, a Cast/Move Operator may
             // be added several time to the same output, if it has multiple childs,
             // even if it is the same conversion each time.
             if (parent.first != nullptr) {
@@ -91,8 +91,8 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) {
 
                 if (node->type() != Cast_Op::Type && input->dataType() != output->dataType()) {
                     // Change of date type => a Cast operator is required
-                    castOp = Cast();
-                    castOp->getOperator()->setDataType(output->dataType());
+                    castOp = Cast(output->dataType());
+                    // castOp->getOperator()->setDataType(output->dataType());
                     castOp->getOperator()->setBackend(device.first, device.second);
 
                     if (moveOp == nullptr) {
diff --git a/src/recipes/ExplicitTranspose.cpp b/src/recipes/ExplicitTranspose.cpp
index a12e76e9ddb701a370bfd29b70ad0775eee55962..7ff971b7e436219d5dfbb7cbadbaf780d3f1aeda 100644
--- a/src/recipes/ExplicitTranspose.cpp
+++ b/src/recipes/ExplicitTranspose.cpp
@@ -57,7 +57,7 @@ void Aidge::explicitTranspose(std::shared_ptr<GraphView> graph) {
 
         IOIndex_t inputIdx = 0;
         for (auto parent : node->inputs()) {
-            // TODO: possible optimization: currently, a Transpose Operator may 
+            // TODO: possible optimization: currently, a Transpose Operator may
             // be added several time to the same output, if it has multiple childs,
             // even if it is the same conversion each time.
             if (parent.first != nullptr) {
@@ -97,7 +97,7 @@ void Aidge::explicitTranspose(std::shared_ptr<GraphView> graph) {
                             const auto transpose = getDataFormatTranspose(parentInput->dataFormat(), output->dataFormat());
                             auto transposeOp = std::static_pointer_cast<Transpose_Op>(parent.first->getOperator());
                             transposeOp->setDataFormat(output->dataFormat());
-                            transposeOp->getAttr<std::vector<DimSize_t>>(0) = std::vector<DimSize_t>(transpose.begin(), transpose.end());
+                            transposeOp->outputDimsOrder() = std::vector<DimSize_t>(transpose.begin(), transpose.end());
                         }
                     }
                     else {
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 7c8c9c2ba2119e0dc708ef4b788690eb223ea0b3..aa20a056ad789975c5b4d493a1ce48dcd7592946 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -62,13 +62,13 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
             std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
         convNbOutChannels = convOpPtr->outChannels();
         channelsSize = convOpPtr->inChannels();
-        kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+        kernelDims = convOpPtr->kernelDims();
     }
     else if (convNode->type() == ConvDepthWise_Op<2>::Type) {
         const std::shared_ptr<ConvDepthWise_Op<2>> convOpPtr =
             std::static_pointer_cast<ConvDepthWise_Op<2>>(convNode->getOperator());
         convNbOutChannels = convOpPtr->nbChannels();
-        kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+        kernelDims = convOpPtr->kernelDims();
     }
     AIDGE_ASSERT(kernelDims.size() == 2, "fuseBatchNorm(): only 2D convolutions are supported");
 
@@ -78,7 +78,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
     const Tensor& b_mean = batchOp->getInput(3)->refCastFrom(b_meanBuf, DataType::Float32, "cpu");
     const Tensor& b_var = batchOp->getInput(4)->refCastFrom(b_varBuf, DataType::Float32, "cpu");
 
-    const float epsilon = batchOp->getAttr<float>("Epsilon");
+    const float epsilon = batchOp->epsilon();
 
 
     assert(epsilon > 0.0);
diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/FuseMulAdd.cpp
index 08e817c8ef4272f24e53a7870ead2c22ad46c186..6112fc47ece6bb361ebad626be7b5a6b1c2189bd 100644
--- a/src/recipes/FuseMulAdd.cpp
+++ b/src/recipes/FuseMulAdd.cpp
@@ -96,7 +96,7 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
         fcName += "_" + addNode->name();
     }
 
-    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(bias ? false : true), fcName);
+    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(), fcName);
 
     // Step 2 : Branch existing producers & create the others
     // link weights & bias
diff --git a/src/recipes/GraphViewHelper.cpp b/src/recipes/GraphViewHelper.cpp
index b0c99bffb895dc64b20d76991911ae5f4b604c85..9522c0fe7346e78875a08d3ebf19a04dea2909e1 100644
--- a/src/recipes/GraphViewHelper.cpp
+++ b/src/recipes/GraphViewHelper.cpp
@@ -44,14 +44,3 @@ std::set<std::shared_ptr<Aidge::Tensor>> Aidge::parameters(std::shared_ptr<Aidge
     }
     return res;
 }
-
-void Aidge::compile_gradient(std::shared_ptr<Aidge::GraphView> gv) {
-    for (const auto& node : gv->getNodes()) {
-        // TODO: check that each node is an OperatorTensor
-        AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Cannot instanciate gradient of an Operator ({}) that doesn't use Tensor.", node->getOperator()->type());
-        const std::shared_ptr<OperatorTensor> op = std::dynamic_pointer_cast<OperatorTensor>(node -> getOperator());
-        for (std::size_t o = 0; o < node -> nbOutputs(); ++o) {
-            op->getOutput(o)->initGrad();
-        }
-    }
-}
diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp
index 9897549304ee04e8512ab7b4ed9450169c7fc911..88691c26d5d7013874c13000535ec2a3842d47d3 100644
--- a/src/recipes/HorizontalTiling.cpp
+++ b/src/recipes/HorizontalTiling.cpp
@@ -74,10 +74,12 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
     // }
 
     std::vector<std::shared_ptr<Node>> clonedInputs = std::vector<std::shared_ptr<Node>>(node->nbInputs(), nullptr);
-    for (std::size_t i = node->nbData(); i < node ->nbInputs(); ++i) {
-        clonedInputs[i] = node -> getParent(i) -> cloneSharedOperators();
-        clonedInputs[i] -> setName(node -> getParent(i) -> name() + "_0");
-        tiledOperator.insert(clonedInputs[i]);
+    for (std::size_t i = 0; i < node ->nbInputs(); ++i) {
+        if (node->inputCategory(i) == InputCategory::Param || node->inputCategory(i) == InputCategory::OptionalParam) {
+            clonedInputs[i] = node -> getParent(i) -> cloneSharedOperators();
+            clonedInputs[i] -> setName(node -> getParent(i) -> name() + "_0");
+            tiledOperator.insert(clonedInputs[i]);
+        }
     }
 
     const std::vector<std::string> sliceInputsNames = Slice_Op::getInputsName();
@@ -92,6 +94,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
 
         auto slice = Slice();
         auto backend = outTensor->getImpl()->backend();
+
         // Create Slice's Starts producer node
         std::vector<std::int64_t> inputDimsStart(inputDims[0].first.size());
         for (std::size_t dim = 0; dim < inputDimsStart.size(); ++dim) {
@@ -139,6 +142,8 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         auto stepsNode = Producer(steps, slice->name() + sliceInputsNames[4]);
         stepsNode -> addChild(slice, 0, 4);
 
+        // auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, inputDimsSteps);
+        // auto backend = outTensor->getImpl()->backend();
         slice -> addChild(newNode, 0, 0);
         newNode -> addChild(concat, 0, i);
 
diff --git a/src/recipes/LabelGraph.cpp b/src/recipes/LabelGraph.cpp
index ac0e6bfe197460c8c422a6c1f3b3240518ee1f29..75bcd36bf61f7c23645038bedb060cd13bdce2c5 100644
--- a/src/recipes/LabelGraph.cpp
+++ b/src/recipes/LabelGraph.cpp
@@ -22,7 +22,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == Conv_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvAttr::KernelDims>(), op->template getAttr<ConvAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -30,7 +30,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == ConvDepthWise_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<ConvDepthWise_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvDepthWiseAttr::KernelDims>(), op->template getAttr<ConvDepthWiseAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -38,7 +38,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == AvgPooling_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<AvgPooling_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<AvgPoolingAttr::KernelDims>(), op->template getAttr<AvgPoolingAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
         return std::make_shared<Node>(newOp, node->name());
     }
 
diff --git a/src/recipes/RemoveNode.cpp b/src/recipes/RemoveNode.cpp
index 317db6f87b2d3c4a6879a2f176afeaf06b36f733..a09c67991409dfe491d46b4ad739f9ddf5b72aef 100644
--- a/src/recipes/RemoveNode.cpp
+++ b/src/recipes/RemoveNode.cpp
@@ -31,7 +31,7 @@ size_t Aidge::removeNode(std::shared_ptr<GraphView> graphView, const std::string
         std::set<NodePtr> nodesToRemove = solution->at(type);
         if (incProducers) {
             for (const auto& nodePtr: (*solution->at(type).begin())->getParents()) {
-                if (nodePtr->type() == "Producer") {
+                if (nodePtr != nullptr && nodePtr->type() == "Producer") {
                     nodesToRemove.insert(nodePtr);
                 }
             }
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 0d59e6a4220385df90cf36d4e978087ac570876c..d63c93deb1ba2d7974ffc6e5b8ccd1e9c57dc76c 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -197,18 +197,20 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S
             bool isStillConsumer = false;
             // Only look for data inputs. If no data is available on data input,
             // by definition, no parameter can be consumed on parameter inputs.
-            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbData(); ++inputIdx) {
-                AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx);
-
-                if (consumer->getOperator()->getNbConsumedData(inputIdx) <
-                            getNbAvailableData(consumer, inputIdx)) {
-                    Log::debug("  still consumer: C{} < P{} for input #{}",
-                        consumer->getOperator()->getNbConsumedData(inputIdx),
-                        getNbAvailableData(consumer, inputIdx), inputIdx);
-
-                    // there is still data to consume
-                    isStillConsumer = true;
-                    break;
+            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) {
+                if (consumer->inputCategory(inputIdx) == InputCategory::Data) {
+                    AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx);
+
+                    if (consumer->getOperator()->getNbConsumedData(inputIdx) <
+                                getNbAvailableData(consumer, inputIdx)) {
+                        Log::debug("  still consumer: C{} < P{} for input #{}",
+                            consumer->getOperator()->getNbConsumedData(inputIdx),
+                            getNbAvailableData(consumer, inputIdx), inputIdx);
+
+                        // there is still data to consume
+                        isStillConsumer = true;
+                        break;
+                    }
                 }
             }
 
@@ -651,19 +653,16 @@ Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>&
         }
     }
 
-    // Otherwise, two cases:
+    // Otherwise, it means that the input is not connected. Two cases:
+    // - There is no data, it is assumed to be an optional input
+    // - A valid tensor exists:
     if (node->getOperator()->getRawInput(inputIdx)) {
-        // Input is not connected but a valid tensor exists
         // => This means data was fed manually to the input, without a Producer
         // In this case, we assume a single-use data (unlike a Producer, which
         // keep producing the data each time it is needed).
         fmt::print("No producer node attached to input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
         return Elts_t::DataElts(std::static_pointer_cast<Tensor>(node->getOperator()->getRawInput(inputIdx))->size());
     }
-    else {
-        // Input is not connected, this is an error
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
-    }
 
     return Elts_t::NoneElts();
 }
diff --git a/src/scheduler/SequentialScheduler.cpp b/src/scheduler/SequentialScheduler.cpp
index 74b1b3f0c6e9be164792460669821744661c15b3..88b5e98bc62456bd59dc235c3112396daaeddd24 100644
--- a/src/scheduler/SequentialScheduler.cpp
+++ b/src/scheduler/SequentialScheduler.cpp
@@ -73,10 +73,7 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, const std::vector<std
     }
 }
 
-void Aidge::SequentialScheduler::backward(bool instanciateGrad) {
-    // create ad set Grad values
-    if (instanciateGrad) { compile_gradient(mGraphView); }
-
+void Aidge::SequentialScheduler::backward() {
     // TODO: Check output grad are not empty
 
     // Generate scheduling *only if empty*
diff --git a/src/utils/Attributes.cpp b/src/utils/Attributes.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e79db53a60a955e3502e070cda5818d3d7b6c922
--- /dev/null
+++ b/src/utils/Attributes.cpp
@@ -0,0 +1,96 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/utils/Attributes.hpp"
+
+#include <cctype>  // std::isdigit, std::islower, std::isupper, std::tolower,
+                   // std::toupper
+#include <string>
+
+std::string Aidge::Attributes::snakeToPascal(const std::string& snakeCase) {
+    std::string result;
+    bool to_upper = true; // Start with uppercase for PascalCase
+
+    for (char ch : snakeCase) {
+        if (ch == '_') {
+            to_upper = true; // Next character should be uppercase
+        } else {
+            if (to_upper) {
+                result += std::toupper(ch);
+                to_upper = false; // Reset flag after making a character uppercase
+            } else {
+                result += ch;
+            }
+        }
+    }
+    return result;
+}
+
+std::string Aidge::Attributes::pascalToSnake(const std::string& pascalCase) {
+    std::string result;
+
+    for (char ch : pascalCase) {
+        if (std::isupper(ch)) {
+            if (!result.empty()) {
+                result += '_';
+            }
+            result += std::tolower(ch);
+        } else {
+            result += ch;
+        }
+    }
+    return result;
+}
+
+bool Aidge::Attributes::isPascalCase(const std::string& str) {
+    if (str.empty() || !std::isupper(str[0])) {
+        return false;
+    }
+
+    bool expectUpper = false;
+    for (size_t i = 1; i < str.size(); ++i) {
+        if (str[i] == '_') {
+            return false;
+        }
+        if (std::isupper(str[i])) {
+            if (!expectUpper) {
+                return false;
+            }
+            expectUpper = false;
+        } else if (std::islower(str[i]) || std::isdigit(str[i])) {
+            expectUpper = true;
+        } else {
+            return false;
+        }
+    }
+    return true;
+}
+
+bool Aidge::Attributes::isSnakeCase(const std::string& str) {
+    if (str.empty()) {
+        return false;
+    }
+
+    bool lastCharWasUnderscore = false;
+    for (char ch : str) {
+        if (ch == '_') {
+            if (lastCharWasUnderscore) {
+                return false;
+            }
+            lastCharWasUnderscore = true;
+        } else if (!std::islower(ch) && !std::isdigit(ch)) {
+            return false;
+        } else {
+            lastCharWasUnderscore = false;
+        }
+    }
+    return true;
+}
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index 655fd725e9d7d913d24c6552571ae3b91e3605b4..62e90dcbd7c20548019afae1a04f84b3e1d4484a 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -40,7 +40,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T_default.dims() == std::vector<DimSize_t>({})) &&
             (T_default.strides() == std::vector<DimSize_t>({1})) &&
             (T_default.getImpl() == nullptr) &&
-            (T_default.grad() == nullptr) &&
+            (T_default.grad() != nullptr) &&
             (T_default.isContiguous() == true)
         ));
     }
@@ -53,7 +53,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({})) &&
             (T.strides() == std::vector<DimSize_t>({1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
     }
@@ -67,7 +67,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == Tdims) &&
             (T.strides() == std::vector<DimSize_t>({5040,2520,840,210,42,7,1})) &&
             (T.getImpl() == nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
     }
@@ -83,7 +83,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({2})) &&
             (T.strides() == std::vector<DimSize_t>({1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
 
@@ -97,7 +97,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({2,2,2})) &&
             (T.strides() == std::vector<DimSize_t>({4,2,1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
         REQUIRE_NOTHROW(T = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
@@ -113,7 +113,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({2,2,2,2})) &&
             (T.strides() == std::vector<DimSize_t>({8,4,2,1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
     }
@@ -157,7 +157,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
                 (T.dims() == Tclone.dims()) &&
                 (T.strides() == Tclone.strides()) &&
                 (T.getImpl() != Tclone.getImpl()) &&
-                (Tclone.grad() == nullptr) &&
+                (Tclone.grad() != nullptr) &&
                 (Tclone.isContiguous() == true)
             ));
             REQUIRE(Tclone == T);
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 8403686d16da15e7e8ad4616029a241d6197d450..8e9f5a27e275a5ce56ddf57fa092ec96cec84711 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -399,9 +399,7 @@ TEST_CASE("[core/graph] GraphView(resetConnections)") {
         conv1->resetConnections(false);
 
         REQUIRE(conv->output(0).size() == 0);
-        for (std::size_t i = 0; i < conv1->nbData(); ++i) {
-        REQUIRE((conv1->input(i) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
-        }
+        REQUIRE((conv1->input(0) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
         REQUIRE((conv1->input(1) == std::pair<std::shared_ptr<Node>, IOIndex_t>(prod1, 0)));
         REQUIRE((conv1->input(2) == std::pair<std::shared_ptr<Node>, IOIndex_t>(prod2, 0)));
         REQUIRE((conv2->input(0) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
@@ -554,6 +552,69 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myConv, other2}));
     }
 
+    SECTION("replace same input category 1") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", {}, 1, "other_input");
+        auto other1 = GenericOperator("Other", {InputCategory::Data}, 1, "other1");
+        auto myOld = GenericOperator("myOld", {InputCategory::Data}, 1, "old");
+        auto other2 = GenericOperator("Other", {InputCategory::Data}, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myOld);
+        myOld->addChild(other2);
+        g->add({other1, myOld, other2});
+
+        auto myNew =  GenericOperator("myNew", {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData}, 1, "new");
+
+        GraphView::replace({myOld}, {myNew});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myNew, other2}));
+        REQUIRE(myNew->input(0).first == other1);
+        REQUIRE(myNew->input(1).first == nullptr);
+        REQUIRE(myNew->input(2).first == nullptr);
+    }
+
+    SECTION("replace same input category 2") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", {}, 1, "other_input");
+        auto other1 = GenericOperator("Other", {InputCategory::Data}, 1, "other1");
+        auto myOld = GenericOperator("myOld", {InputCategory::Param}, 1, "old");
+        auto other2 = GenericOperator("Other", {InputCategory::Data}, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myOld, 0, 0);
+        myOld->addChild(other2);
+        g->add({other1, myOld, other2});
+
+        auto myNew =  GenericOperator("myNew", {InputCategory::Data, InputCategory::Param, InputCategory::Data}, 1, "new");
+
+        GraphView::replace({myOld}, {myNew});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myNew, other2}));
+        REQUIRE(myNew->input(0).first == nullptr);
+        REQUIRE(myNew->input(1).first == other1);
+        REQUIRE(myNew->input(2).first == nullptr);
+    }
+
+    SECTION("replace same input category 3") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", {}, 1, "other_input");
+        auto other1 = GenericOperator("Other", {InputCategory::Data}, 1, "other1");
+        auto myOld = GenericOperator("myOld", {InputCategory::Data}, 1, "old");
+        auto other2 = GenericOperator("Other", {InputCategory::Data}, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myOld);
+        myOld->addChild(other2);
+        g->add({other1, myOld, other2});
+
+        auto myNew =  GenericOperator("myNew", {InputCategory::Data, InputCategory::Data, InputCategory::Data}, 1, "new");
+
+        GraphView::replace({myOld}, {myNew});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myNew, other2}));
+        REQUIRE(myNew->input(0).first == other1);
+        REQUIRE(myNew->input(1).first == other1);
+        REQUIRE(myNew->input(2).first == other1);
+    }
+
     SECTION("Change every Nodes in a GraphView") {
         auto matmulWeight0 = GenericOperator("Producer", 0, 0, 1, "matmul_w0");
         auto addBias0 = GenericOperator("Producer", 0, 0, 1, "add_b0");
diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp
index 903eefc0c7e7a34170d60dc136e792b8687e96e3..6abb4d37114d0952feb13c6cfbee66bd65dc5748 100644
--- a/unit_tests/graph/Test_Matching.cpp
+++ b/unit_tests/graph/Test_Matching.cpp
@@ -323,7 +323,7 @@ TEST_CASE("[core/graph] Matching") {
         gm.addNodeLambda("3x3", [](const NodePtr& node) {
             const std::shared_ptr<Conv_Op<2>> op =
                 std::static_pointer_cast<Conv_Op<2>>(node->getOperator());
-            return (op->getAttr<std::array<DimSize_t, 2>>("KernelDims") == std::array<DimSize_t, 2>({3, 3}));
+            return (op->kernelDims() == std::array<DimSize_t, 2>({3, 3}));
         });
 
         const auto results = gm.match("Pad->Conv[3x3]->ReLU");
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 8d634cc3a105c423b54b6003f41204aeb1fc5335..41bad69749fd82f892c6faa625739d0493396c73 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -20,7 +20,7 @@ using namespace Aidge;
 TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("INT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        const char* key = "intAttr";
+        const char* key = "IntAttr";
         Testop.addAttr(key, int(5));
         int registeredVal = Testop.getAttr<int>(key);
         REQUIRE(registeredVal == 5);
@@ -28,21 +28,21 @@ TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("LONG") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         long value = 3;
-        const char* key = "longAttr";
+        const char* key = "LongAttr";
         Testop.addAttr(key, value);
         REQUIRE(Testop.getAttr<long>(key) == value);
     }
     SECTION("FLOAT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         float value = 2.0;
-        const char* key = "floatAttr";
+        const char* key = "FloatAttr";
         Testop.addAttr(key, value);
         REQUIRE(Testop.getAttr<float>(key) == value);
     }
      SECTION("VECTOR<BOOL>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<bool> value = {true, false, false, true, true};
-        const char* key = "vect";
+        const char* key = "Vect";
         Testop.addAttr(key, value);
 
         REQUIRE(Testop.getAttr<std::vector<bool>>(key).size() == value.size());
@@ -53,7 +53,7 @@ TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("VECTOR<INT>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<int> value = {1, 2, 3, 4, 5, 6, 7, 8, 9};
-        const char* key = "vect";
+        const char* key = "Vect";
         Testop.addAttr(key, value);
 
         REQUIRE(Testop.getAttr<std::vector<int>>(key).size() == value.size());
@@ -66,23 +66,23 @@ TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
         Goal : Test that the offsets are well done by adding different attributes with different size.
         */
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addAttr<long>("longAttr", 3);
-        Testop.addAttr<float>("floatAttr", 2.0);
-        Testop.addAttr<uint8_t>("uint8Attr", 5);
-        Testop.addAttr<long long>("llAttr", 10);
-        REQUIRE(Testop.getAttr<long>("longAttr") == 3);
-        REQUIRE(Testop.getAttr<float>("floatAttr") == 2.0);
-        REQUIRE(Testop.getAttr<uint8_t>("uint8Attr") == 5);
-        REQUIRE(Testop.getAttr<long long>("llAttr") == 10);
+        Testop.addAttr<long>("LongAttr", 3);
+        Testop.addAttr<float>("FloatAttr", 2.0);
+        Testop.addAttr<uint8_t>("Uint8Attr", 5);
+        Testop.addAttr<long long>("LlAttr", 10);
+        REQUIRE(Testop.getAttr<long>("LongAttr") == 3);
+        REQUIRE(Testop.getAttr<float>("FloatAttr") == 2.0);
+        REQUIRE(Testop.getAttr<uint8_t>("Uint8Attr") == 5);
+        REQUIRE(Testop.getAttr<long long>("LlAttr") == 10);
     }
 }
 
 TEST_CASE("[core/operator] GenericOp(type check)", "[Operator]") {
     SECTION("WRONG TYPE FOR GETTER") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addAttr<long>("longAttr", 3);
+        Testop.addAttr<long>("LongAttr", 3);
 
         // This line should raise a failled assert
-        REQUIRE_THROWS(Testop.getAttr<int>("longAttribute"));
+        REQUIRE_THROWS(Testop.getAttr<int>("LongAttribute"));
     }
 }
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index ed4afafe39a367ecabb25ff949eb3d03999d1ea9..d1b4e2e31e8c57e2c3eebd42019ba9f42c4d39e0 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -39,7 +39,9 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         REQUIRE(microGraph->outputNodes().size() == 1);
         REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv");
         REQUIRE(op->nbInputs() == 3);
-        REQUIRE(op->nbData() == 1);
+        REQUIRE(op->inputCategory(0) == InputCategory::Data);
+        REQUIRE(op->inputCategory(1) == InputCategory::Param);
+        REQUIRE(op->inputCategory(2) == InputCategory::OptionalParam);
         REQUIRE(op->nbOutputs() == 1);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(std::vector<std::size_t>({2,1,5,5}));
@@ -66,7 +68,13 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         microGraph->save("lstm", false, false);
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>();
@@ -94,7 +102,13 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator());
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
@@ -129,6 +143,6 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         REQUIRE(g->rootNode() == pop);
         g->save("lstm_expanded", true, true);
 
-        REQUIRE(g->getNodes().size() == 41);
+        REQUIRE(g->getNodes().size() == 33);
     }
 }
diff --git a/unit_tests/recipes/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp
index 84099ac0b77a633893af6a7550464e539c95d806..24f5aa2e231b5204add1c8f87cdeb7a71175ea05 100644
--- a/unit_tests/recipes/Test_removeFlatten.cpp
+++ b/unit_tests/recipes/Test_removeFlatten.cpp
@@ -27,8 +27,8 @@ namespace Aidge {
 TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
   std::shared_ptr<Node> flatten =
       GenericOperator("Flatten", 1, 0, 1, "myFlatten");
-  std::shared_ptr<Node> fc0 = FC(10, 10, "FC_1");
-  std::shared_ptr<Node> fc1 = FC(10, 10, "FC_2");
+  std::shared_ptr<Node> fc0 = FC(10, 10, false, "FC_1");
+  std::shared_ptr<Node> fc1 = FC(10, 10, false, "FC_2");
   std::shared_ptr<Node> prod = Producer(std::array<DimSize_t, 10>(), "myProd");
 
   SECTION("flatten last layer : nothing removed because pattern searched is "
diff --git a/version.txt b/version.txt
index 0c62199f16ac1e2d7f7ae75b420c1231325dff4e..ee1372d33a29e27945406f0527f8af8e6ee119c9 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.2.1
+0.2.2