diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py
index 652f485a9d3de6869b55613549172d49913e8509..8544c5647befe4d5aa4aa362d016787131c36692 100644
--- a/aidge_core/__init__.py
+++ b/aidge_core/__init__.py
@@ -7,8 +7,8 @@ http://www.eclipse.org/legal/epl-2.0.
 
 SPDX-License-Identifier: EPL-2.0
 """
-from .aidge_core import * # import so generated by PyBind
-from .export_utils import ExportNode, generate_file, generate_str
-from .aidge_export_aidge import *
-from . import utils
+from aidge_core.aidge_core import * # import so generated by PyBind
+import aidge_core.export_utils
+import aidge_core.utils
+from aidge_core.aidge_export_aidge import serialize_to_cpp
 from ._version import *
diff --git a/aidge_core/aidge_export_aidge/__init__.py b/aidge_core/aidge_export_aidge/__init__.py
index c5d6f96b3300c0e86147baac30d7cae8a3a0b798..9f042cdbcfff071dabc9a31817bf8e2e95c36ad9 100644
--- a/aidge_core/aidge_export_aidge/__init__.py
+++ b/aidge_core/aidge_export_aidge/__init__.py
@@ -5,4 +5,4 @@ FILE = Path(__file__).resolve()
 ROOT_EXPORT = FILE.parents[0]
 
 from .operator_export import *
-from .export import export
+from .export import serialize_to_cpp
diff --git a/aidge_core/aidge_export_aidge/export.py b/aidge_core/aidge_export_aidge/export.py
index ac8c2e0b86cc4f7b1009f9a701d6dcb91d248009..f5d8c8c7ca6f3fa7c7ef19b1aef3987c20acd1f7 100644
--- a/aidge_core/aidge_export_aidge/export.py
+++ b/aidge_core/aidge_export_aidge/export.py
@@ -2,15 +2,14 @@ import aidge_core
 import shutil
 import os
 from pathlib import Path
-from .utils import supported_operators, OPERATORS_REGISTRY
-from . import ROOT_EXPORT
-
-
-from aidge_core import ExportNode, generate_file
 
+import aidge_core.export_utils
+from . import ROOT_EXPORT
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
 
+from aidge_core.export_utils  import generate_file
 
-def export(export_folder: str,
+def serialize_to_cpp(export_folder: str,
            graph_view: aidge_core.GraphView,
            enable_python_binding: bool = True,
            ):
@@ -59,7 +58,6 @@ def export(export_folder: str,
     open_nodes = list(graph_view.get_input_nodes())
     # List of Aidge nodes already explored
     closed_nodes = []
-
     while open_nodes:
         node = open_nodes.pop(0)
         if node in closed_nodes:
@@ -80,24 +78,34 @@ def export(export_folder: str,
             continue
         # Next nodes to treat are children of current node
         open_nodes += list(node.get_children())
-
-        if node.type() in supported_operators():
-            list_operators.append(node.type())
-            op = OPERATORS_REGISTRY[node.type()](node)
-
-            # TODO: list_configs and list_actions don't need to be passed by argument
-            # Export the configuration
-            list_configs = op.export(export_folder_path, list_configs)
-
-            # Add forward kernel
-            list_actions = op.forward(list_actions)
-        else:
-            raise RuntimeError(f"Operator: {node.type()} is not supported")
+        node.get_operator().set_backend(ExportSerialize._name)
+        op_impl = node.get_operator().get_impl()
+        if op_impl is None:
+            raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.")
+        if not isinstance(op_impl, ExportSerialize):
+            raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).")
+        required_specs = op_impl.get_required_spec()
+        specs = op_impl.get_best_match(required_specs)
+        export_node = op_impl.get_export_node(specs)
+        if export_node is None:
+            raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
+        op = export_node(
+            node, None)
+
+
+        # set_operator.add(node.type())
+
+        # TODO: list_configs and list_actions don't need to be passed by argument
+        # Export the configuration
+        list_configs += op.export(export_folder_path)
+
+        # Add forward kernel
+        list_actions += op.forward()
         closed_nodes.append(node)
     list_operators = list(dict.fromkeys(list_operators)) # make unique
 
     # Generate full dnn.cpp
-    aidge_core.generate_file(
+    aidge_core.export_utils.generate_file(
         export_folder_path / "src/dnn.cpp",
         ROOT_EXPORT / "templates/dnn.jinja",
         headers=list_configs,
diff --git a/aidge_core/aidge_export_aidge/operator_export/add.py b/aidge_core/aidge_export_aidge/operator_export/add.py
new file mode 100644
index 0000000000000000000000000000000000000000..4eb7c3e37f0d63388a5bfe8600f184b9da2ffc49
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/add.py
@@ -0,0 +1,14 @@
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
+
+@ExportSerialize.register("Add", ImplSpec(IOSpec(dtype.any)))
+class Add(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = ""
+        self.forward_template = str(
+            ROOT_EXPORT / "templates/graph_ctor/add.jinja")
+        self.include_list = ["aidge/operator/Add.hpp"]
+        self.kernels_to_copy = []
diff --git a/aidge_core/aidge_export_aidge/operator_export/conv.py b/aidge_core/aidge_export_aidge/operator_export/conv.py
index fb7092fb18982a3cc3f11a1ca47394ce2f77d0b6..ea23c1551787c8579549d54a1fe7396995eb1bff 100644
--- a/aidge_core/aidge_export_aidge/operator_export/conv.py
+++ b/aidge_core/aidge_export_aidge/operator_export/conv.py
@@ -1,31 +1,17 @@
-from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
 from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from aidge_core import ExportNode, generate_file, generate_str
-from pathlib import Path
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
 
-@operator_register("Conv")
-class Conv(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-    def export(self, export_folder:Path, list_configs:list):
-        include_path = f"attributes/{self.name}.hpp"
-        filepath = export_folder / f"include/{include_path}"
-
-        generate_file(
-            filepath,
-            ROOT_EXPORT / "templates/attributes/conv.jinja",
-            name=self.name,
-            **self.attributes
-        )
-        list_configs.append(include_path)
-        return list_configs
-
-    def forward(self, list_actions:list):
-        list_actions.append(generate_str(
-            ROOT_EXPORT /"templates/graph_ctor/conv.jinja",
-            name=self.name,
-            inputs=parse_node_input(self.node.inputs()),
-            **self.attributes
-        ))
-        return list_actions
+@ExportSerialize.register(["Conv1D", "Conv2D"], ImplSpec(IOSpec(dtype.any)))
+class Conv(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = str(
+            ROOT_EXPORT / "templates/attributes/conv.jinja")
+        self.forward_template = str(
+            ROOT_EXPORT /"templates/graph_ctor/conv.jinja")
+        self.include_list = ["aidge/operator/Conv.hpp"]
+        self.kernels_to_copy = []
+        self.config_path = "include/attributes"
+        self.config_extension = "hpp"
diff --git a/aidge_core/aidge_export_aidge/operator_export/conv_depth_wise.py b/aidge_core/aidge_export_aidge/operator_export/conv_depth_wise.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e04f8aac17da5662a3ed08bc627969dbb3a9c13
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/conv_depth_wise.py
@@ -0,0 +1,17 @@
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
+
+@ExportSerialize.register(["ConvDepthWise1D", "ConvDepthWise2D"], ImplSpec(IOSpec(dtype.any)))
+class ConvDepthWise(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = str(
+            ROOT_EXPORT / "templates/attributes/conv_depth_wise.jinja")
+        self.forward_template = str(
+            ROOT_EXPORT /"templates/graph_ctor/conv_depth_wise.jinja")
+        self.include_list = ["aidge/operator/ConvDepthWise.hpp"]
+        self.kernels_to_copy = []
+        self.config_path = "include/attributes"
+        self.config_extension = "hpp"
diff --git a/aidge_core/aidge_export_aidge/operator_export/fc.py b/aidge_core/aidge_export_aidge/operator_export/fc.py
index fcd528528707dc6eec917790b46e509c2984fa66..4f964a9942600d46740b570975a218b4c2e7aabd 100644
--- a/aidge_core/aidge_export_aidge/operator_export/fc.py
+++ b/aidge_core/aidge_export_aidge/operator_export/fc.py
@@ -1,37 +1,18 @@
-from aidge_core.aidge_export_aidge.utils import operator_register,parse_node_input
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
 from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from aidge_core import ExportNode, generate_file, generate_str
-from pathlib import Path
-
-@operator_register("FC")
-class FC(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-
-    def export(self, export_folder:Path, list_configs:list):
-
-
-        include_path = f"attributes/{self.name}.hpp"
-        filepath = export_folder / f"include/{include_path}"
-
-
-        generate_file(
-            filepath,
-            ROOT_EXPORT / "templates/attributes/fc.jinja",
-            name=self.name,
-            InChannels=self.inputs_dims[1][1],
-            OutChannels=self.operator.out_channels(),
-            **self.attributes
-        )
-        list_configs.append(include_path)
-        return list_configs
-
-    def forward(self, list_actions:list):
-        list_actions.append(generate_str(
-            ROOT_EXPORT / "templates/graph_ctor/fc.jinja",
-            name=self.name,
-            inputs=parse_node_input(self.node.inputs()),
-            **self.attributes
-        ))
-        return list_actions
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
+
+
+@ExportSerialize.register("FC", ImplSpec(IOSpec(dtype.any)))
+class FC(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = str(
+            ROOT_EXPORT / "templates/attributes/fc.jinja")
+        self.forward_template = str(
+            ROOT_EXPORT / "templates/graph_ctor/fc.jinja")
+        self.include_list = ["aidge/operator/FC.hpp"]
+        self.kernels_to_copy = []
+        self.config_path = "include/attributes"
+        self.config_extension = "hpp"
diff --git a/aidge_core/aidge_export_aidge/operator_export/maxpooling.py b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
index 0c63e71b423b90f62536cafd25c61101e76e0562..6d9c7998fb90153bfdfd2898c1dfcfb1ad730f20 100644
--- a/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
+++ b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
@@ -1,32 +1,17 @@
-from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
 from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from aidge_core import ExportNode, generate_file, generate_str
-from pathlib import Path
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
 
-@operator_register("MaxPooling")
-class MaxPooling(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-
-    def export(self, export_folder:Path, list_configs:list):
-        include_path = f"attributes/{self.name}.hpp"
-        filepath = export_folder / f"include/{include_path}"
-
-        generate_file(
-            filepath,
-            ROOT_EXPORT / "templates/attributes/maxpooling.jinja",
-            name=self.name,
-            **self.attributes
-        )
-        list_configs.append(include_path)
-        return list_configs
-
-    def forward(self, list_actions:list):
-        list_actions.append(generate_str(
-            ROOT_EXPORT / "templates/graph_ctor/maxpooling.jinja",
-            name=self.name,
-            inputs=parse_node_input(self.node.inputs()),
-            **self.attributes
-        ))
-        return list_actions
+@ExportSerialize.register(["MaxPooling1D", "MaxPooling2D", "MaxPooling3D"], ImplSpec(IOSpec(dtype.any)))
+class MaxPooling(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = str(
+            ROOT_EXPORT / "templates/attributes/maxpooling.jinja")
+        self.forward_template = str(
+            ROOT_EXPORT / "templates/graph_ctor/maxpooling.jinja")
+        self.include_list = ["aidge/operator/MaxPooling.hpp"]
+        self.kernels_to_copy = []
+        self.config_path = "include/attributes"
+        self.config_extension = "hpp"
diff --git a/aidge_core/aidge_export_aidge/operator_export/pad.py b/aidge_core/aidge_export_aidge/operator_export/pad.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d6869de07b985169399697e95b6b719f658c911
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/pad.py
@@ -0,0 +1,17 @@
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
+
+@ExportSerialize.register(["Pad1D", "Pad2D"], ImplSpec(IOSpec(dtype.any)))
+class Pad(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = str(
+            ROOT_EXPORT / "templates/attributes/pad.jinja")
+        self.forward_template = str(
+            ROOT_EXPORT /"templates/graph_ctor/pad.jinja")
+        self.include_list = ["aidge/operator/Pad.hpp"]
+        self.kernels_to_copy = []
+        self.config_path = "include/attributes"
+        self.config_extension = "hpp"
diff --git a/aidge_core/aidge_export_aidge/operator_export/producer.py b/aidge_core/aidge_export_aidge/operator_export/producer.py
index d082e9726b7ca33fbe6f4692bf7b55930b69cb9d..02f2f1f39c6797d7f92a5938d6dbe8853079a624 100644
--- a/aidge_core/aidge_export_aidge/operator_export/producer.py
+++ b/aidge_core/aidge_export_aidge/operator_export/producer.py
@@ -1,65 +1,30 @@
-from aidge_core.aidge_export_aidge.utils import operator_register
-from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from aidge_core import dtype, ExportNode, generate_file, generate_str
 import numpy as np
-from pathlib import Path
-
-# Convert aidge datatype to C++ type
-datatype_converter = {
-    dtype.float64 : "double",
-    dtype.float32 : "float",
-    dtype.float16 : "half_float::half",
-    dtype.int8    : "int8_t",
-    dtype.int16   : "int16_t",
-    dtype.int32   : "int32_t",
-    dtype.int64   : "int64_t",
-    dtype.uint8   : "uint8_t",
-    dtype.uint16  : "uint16_t",
-    dtype.uint32  : "uint32_t",
-    dtype.uint64  : "uint64_t"
-}
 
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
 
-@operator_register("Producer")
-class Producer(ExportNode):
+@ExportSerialize.register("Producer", ImplSpec(IOSpec(dtype.any)))
+class Producer(ExportNodeCpp):
     """
     If there is a standardization of the export operators
     then this class should be just a inheritance of ProducerCPP
     """
-    def __init__(self, node):
-        super().__init__(node)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         child, in_idx = self.node.output(0)[0]
-        self.tensor_name = f"{child.name()}_{in_idx}"
-        self.values = np.array(self.operator.get_output(0))
-
-    def export(self, export_folder:Path, list_configs:list):
-        assert(len(self.node.output(0)) == 1)
 
-        include_path = f"parameters/{self.tensor_name}.hpp"
-        filepath = export_folder / f"include/{include_path}"
+        self.values = np.array(self.operator.get_output(0))
 
-        aidge_tensor = self.operator.get_output(0)
-        aidge_type = aidge_tensor.dtype()
-        if aidge_type in datatype_converter:
-            datatype = datatype_converter[aidge_type]
-        else:
-            raise RuntimeError(f"No conversion found for data type {aidge_type}.")
-        generate_file(
-            filepath,
-            ROOT_EXPORT / "templates/parameter.jinja",
-            dims = aidge_tensor.dims(),
-            data_t = datatype, # TODO : get data from producer
-            name = self.tensor_name,
-            values = str(aidge_tensor)
-        )
-        list_configs.append(include_path)
-        return list_configs
+        self.config_template = str(
+            ROOT_EXPORT / "templates/parameter.jinja")
+        self.forward_template = str(
+            ROOT_EXPORT / "templates/graph_ctor/producer.jinja")
+        self.attributes["tensor_name"] = f"{child.name()}_{in_idx}"
+        self.attributes["values"] = str(self.operator.get_output(0))
+        self.include_list = ["aidge/operator/Producer.hpp"]
+        self.kernels_to_copy = []
+        self.config_path = "include/attributes"
+        self.config_extension = "hpp"
 
-    def forward(self, list_actions:list):
-        list_actions.append(generate_str(
-            ROOT_EXPORT / "templates/graph_ctor/producer.jinja",
-            name=self.name,
-            tensor_name=self.tensor_name,
-            **self.attributes
-        ))
-        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/relu.py b/aidge_core/aidge_export_aidge/operator_export/relu.py
index c0f4f6afdc35737a8967f51c1859bda0c9773f88..b8398e30504b534fba755e6c613d361d873e09cd 100644
--- a/aidge_core/aidge_export_aidge/operator_export/relu.py
+++ b/aidge_core/aidge_export_aidge/operator_export/relu.py
@@ -1,21 +1,14 @@
-from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
-from aidge_core import ExportNode, generate_str
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
 from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from pathlib import Path
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
 
-@operator_register("ReLU")
-class ReLU(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-    def export(self, export_folder:Path, list_configs:list):
-        return list_configs
-
-    def forward(self, list_actions:list):
-        list_actions.append(generate_str(
-            ROOT_EXPORT / "templates/graph_ctor/relu.jinja",
-            name=self.name,
-            inputs=parse_node_input(self.node.inputs()),
-            **self.attributes
-        ))
-        return list_actions
+@ExportSerialize.register("ReLU", ImplSpec(IOSpec(dtype.any)))
+class ReLU(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = ""
+        self.forward_template = str(
+            ROOT_EXPORT / "templates/graph_ctor/relu.jinja")
+        self.include_list = ["aidge/operator/ReLU.hpp"]
+        self.kernels_to_copy = []
diff --git a/aidge_core/aidge_export_aidge/operator_export/sub.py b/aidge_core/aidge_export_aidge/operator_export/sub.py
index efcdd0924fbcf6944b0fb95a967e1a3e16ccc3c5..01b68b70f4cfcf3b3899202269106c58cb7e54a1 100644
--- a/aidge_core/aidge_export_aidge/operator_export/sub.py
+++ b/aidge_core/aidge_export_aidge/operator_export/sub.py
@@ -1,21 +1,14 @@
-from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
-from aidge_core import ExportNode, generate_str
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
 from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from pathlib import Path
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
 
-@operator_register("Sub")
-class Sub(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-    def export(self, export_folder:Path, list_configs:list):
-        return list_configs
-
-    def forward(self, list_actions:list):
-        list_actions.append(generate_str(
-            ROOT_EXPORT / "templates/graph_ctor/sub.jinja",
-            name=self.name,
-            inputs=parse_node_input(self.node.inputs()),
-            **self.attributes
-        ))
-        return list_actions
+@ExportSerialize.register("Sub", ImplSpec(IOSpec(dtype.any)))
+class Sub(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = ""
+        self.forward_template = str(
+            ROOT_EXPORT / "templates/graph_ctor/sub.jinja")
+        self.include_list = ["aidge/operator/Sub.hpp"]
+        self.kernels_to_copy = []
diff --git a/aidge_core/aidge_export_aidge/registry.py b/aidge_core/aidge_export_aidge/registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe94a22399438b9a07673d21220ff1d0ba4a1dda
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/registry.py
@@ -0,0 +1,10 @@
+from aidge_core.export_utils import ExportLib
+from . import ROOT_EXPORT
+import aidge_core
+
+
+class ExportSerialize(ExportLib):
+    _name="export_serialize"
+
+aidge_core.register_Tensor(["export_serialize", aidge_core.dtype.float32],
+                           aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float32]))
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
index 48d07e8db8d5fb116148e9d41100fffa01fcf622..58c52abec545f8e62e21dfb35c9f4a5d652f681e 100644
--- a/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
+++ b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
@@ -1,17 +1,17 @@
 #ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
 #define EXPORT_ATTRIBUTES_{{name|upper}}_H
 
-#define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
-#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
+#define _{{name|upper}}_IN_CHANNELS  {{in_chan[0]}}
+#define _{{name|upper}}_OUT_CHANNELS {{out_chan[0]}}
 
-{% for i in range(KernelDims|length) %}
-#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}}
+{% for i in range(kernel_dims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}}
 {%- endfor %}
-{% for i in range(StrideDims|length) %}
-#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}}
+{% for i in range(stride_dims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}}
 {%- endfor %}
-{% for i in range(DilationDims|length) %}
-#define _{{name|upper}}_DILATION_{{i}} {{DilationDims[i]}}
+{% for i in range(dilation_dims|length) %}
+#define _{{name|upper}}_DILATION_{{i}} {{dilation_dims[i]}}
 {%- endfor %}
 
 #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/conv_depth_wise.jinja b/aidge_core/aidge_export_aidge/templates/attributes/conv_depth_wise.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..7c2ffff448bb3d028f378ba6fc124abdad6e9ad7
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/conv_depth_wise.jinja
@@ -0,0 +1,16 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+#define _{{name|upper}}_CHANNELS {{out_chan[0]}}
+
+{% for i in range(kernel_dims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}}
+{%- endfor %}
+{% for i in range(stride_dims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}}
+{%- endfor %}
+{% for i in range(dilation_dims|length) %}
+#define _{{name|upper}}_DILATION_{{i}} {{dilation_dims[i]}}
+{%- endfor %}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
index e292f9b611978877c47b15e91f926f30d27a1cc5..32f4d00515b1ced28b5e49889c09759f0f0dd0db 100644
--- a/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
+++ b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
@@ -1,7 +1,7 @@
 #ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
 #define EXPORT_ATTRIBUTES_{{name|upper}}_H
 
-#define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
-#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
+#define _{{name|upper}}_IN_CHANNELS  {{in_chan[0]}}
+#define _{{name|upper}}_OUT_CHANNELS {{out_chan[0]}}
 
 #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
index d258f580e6ff9c523a87b834fdccf2f3b14fb133..96de14b01167a2f4267343594ded8df6f4b5576d 100644
--- a/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
+++ b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
@@ -1,13 +1,13 @@
 #ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
 #define EXPORT_ATTRIBUTES_{{name|upper}}_H
 
-{% for i in range(KernelDims|length) %}
-#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}}
+{% for i in range(kernel_dims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}}
 {%- endfor %}
-{% for i in range(StrideDims|length) %}
-#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}}
+{% for i in range(stride_dims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}}
 {%- endfor %}
 
-#define _{{name|upper}}_CEIL_MODE {{CeilMode|int}}
+#define _{{name|upper}}_CEIL_MODE {{ceil_mode|int}}
 
 #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/pad.jinja b/aidge_core/aidge_export_aidge/templates/attributes/pad.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8fb76a6b220c643da9a3f02d38f1757fed0c1b86
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/pad.jinja
@@ -0,0 +1,12 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+{%- set half_length = (begin_end_borders|length / 2)|int -%}
+{% for i in range(half_length) %}
+#define _{{name|upper}}_BEGIN_BORDERS_{{i}}  {{begin_end_borders[2*i]}}
+#define _{{name|upper}}_END_BORDERS_{{i}}  {{begin_end_borders[2*i+1]}}
+{%- endfor %}
+#define _{{name|upper}}_BORDER_TYPE {{border_type|int}}
+#define _{{name|upper}}_BORDER_VALUE {{border_value}}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/dnn.jinja b/aidge_core/aidge_export_aidge/templates/dnn.jinja
index 5da46b2d8a439a359dfb1c7ec8ebc18e8d516767..bb8faff4a800d9676317f5c0301827e21d19df6d 100644
--- a/aidge_core/aidge_export_aidge/templates/dnn.jinja
+++ b/aidge_core/aidge_export_aidge/templates/dnn.jinja
@@ -17,7 +17,7 @@
 
 /*** OPERATOR ATTRIBUTES & PARAMETERS ***/
 {%- for header in headers %}
-#include "{{ header }}"
+#include "{{ header | replace('include/', '') }}"
 {%- endfor %}
 
 /*** HEADER ***/
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
index 8e841ea2a10c71b884736dcbd7cfd03b52c5ad4f..d9f59a94663692b00594f0ecf5f452cc8e2132ca 100644
--- a/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
@@ -1,7 +1,7 @@
 {# NOTE: Trying a shorter notation like {%- for input in inputs if input[0] %}
 will mess up loop.index as the input set up at None will not increment ! #}
-{%- for input in inputs %}
-{%- if input[0] %}
-{{input[0]}}->addChild({{name}}, {{input[1]}}, {{loop.index - 1}}); {# NOTE: loop.index begin at 1 #}
+{%- for input_node, out_id in node.inputs() %}
+{%- if input_node %}
+{{input_node.name()}}->addChild({{name}}, {{out_id}}, {{loop.index - 1}}); {# NOTE: loop.index begin at 1 #}
 {%- endif %}
 {%- endfor %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/add.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/add.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..2bfaf93646fc24f6a44ac170a8c2c932f5daf0fc
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/add.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Add(
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
index a805f8065e87244bf0546ca42d294b86f144a26d..bd4eed2d39f0872759f568b6cc54b8abe3792db7 100644
--- a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
@@ -5,18 +5,18 @@ std::shared_ptr<Aidge::Node> {{name}} =
             _{{name|upper}}_IN_CHANNELS,
             _{{name|upper}}_OUT_CHANNELS,
             {
-            {%- for i in range(KernelDims|length) -%}
+            {%- for i in range(kernel_dims|length) -%}
                 _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
             {%- endfor -%}
             },
             "{{name}}",
             {
-            {%- for i in range(StrideDims|length) -%}
+            {%- for i in range(stride_dims|length) -%}
                 _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
             {%- endfor -%}
             },
             {
-            {%- for i in range(DilationDims|length) -%}
+            {%- for i in range(dilation_dims|length) -%}
                 _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%}
             {%- endfor -%}
             }
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv_depth_wise.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv_depth_wise.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..f7e1a85bbc084631ea4d26bfadd106bc2a5a69fe
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv_depth_wise.jinja
@@ -0,0 +1,25 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::ConvDepthWise(
+            _{{name|upper}}_CHANNELS,
+            {
+            {%- for i in range(kernel_dims|length) -%}
+                _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            "{{name}}",
+            {
+            {%- for i in range(stride_dims|length) -%}
+                _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            {
+            {%- for i in range(dilation_dims|length) -%}
+                _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            }
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
index c6587c128509712e1a8e903e7484476548e9347d..ceb4784a0942e91b44fe6956833dafda26a2e314 100644
--- a/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
@@ -3,13 +3,13 @@
 std::shared_ptr<Aidge::Node> {{name}} =
         Aidge::MaxPooling(
             {
-            {%- for i in range(KernelDims|length) -%}
+            {%- for i in range(kernel_dims|length) -%}
                 _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
             {%- endfor -%}
             },
             "{{name}}",
             {
-            {%- for i in range(StrideDims|length) -%}
+            {%- for i in range(stride_dims|length) -%}
                 _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
             {%- endfor -%}
             },
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/pad.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/pad.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..a7bd866207be9f048ae431922710b975268a6155
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/pad.jinja
@@ -0,0 +1,17 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+{%- set half_length = (begin_end_borders|length / 2)|int -%}
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Pad<{{half_length}}>(
+            {
+            {%- for i in range(half_length) -%}
+                _{{name|upper}}_BEGIN_BORDERS_{{i}},  _{{name|upper}}_END_BORDERS_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            "{{name}}",
+             static_cast<Aidge::PadBorderType>(_{{name|upper}}_BORDER_TYPE),
+             _{{name|upper}}_BORDER_VALUE
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/parameter.jinja b/aidge_core/aidge_export_aidge/templates/parameter.jinja
index 11a407cc89f72f24167871a594decc6d90ab489d..0ff9634d98c57ad84dabc30625e660ca404612f6 100644
--- a/aidge_core/aidge_export_aidge/templates/parameter.jinja
+++ b/aidge_core/aidge_export_aidge/templates/parameter.jinja
@@ -4,7 +4,7 @@
 #include <aidge/data/Tensor.hpp>
 #include <memory>
 
-std::shared_ptr<Aidge::Tensor> {{name}} = std::make_shared<Aidge::Tensor>(Aidge::Array{{dims|length}}D<{{data_t}}, {{ dims|join(", ") }}> {
+std::shared_ptr<Aidge::Tensor> {{tensor_name}} = std::make_shared<Aidge::Tensor>(Aidge::Array{{out_dims[0]|length}}D<{{out_cdtype[0]}}, {{ out_dims[0]|join(", ") }}> {
 {{ values }}
 });
 
diff --git a/aidge_core/aidge_export_aidge/utils/__init__.py b/aidge_core/aidge_export_aidge/utils.py
similarity index 92%
rename from aidge_core/aidge_export_aidge/utils/__init__.py
rename to aidge_core/aidge_export_aidge/utils.py
index ecdf2aec2692a48e108d5f4ad05ed05803319525..31a0d6ee77eafb0883f2882bfaae999307a0fa4b 100644
--- a/aidge_core/aidge_export_aidge/utils/__init__.py
+++ b/aidge_core/aidge_export_aidge/utils.py
@@ -1,5 +1,3 @@
-from .operator_registry import *
-
 def parse_node_input(node_inputs: list) -> list:
     """Parse node intputs in order to adapt the list for Jinja.
 
diff --git a/aidge_core/aidge_export_aidge/utils/operator_registry.py b/aidge_core/aidge_export_aidge/utils/operator_registry.py
deleted file mode 100644
index dd6fbaaceeba9c2125b38354eca9cc116acd29b1..0000000000000000000000000000000000000000
--- a/aidge_core/aidge_export_aidge/utils/operator_registry.py
+++ /dev/null
@@ -1,18 +0,0 @@
-OPERATORS_REGISTRY = {}
-
-def operator_register(*args):
-
-    key_list = [arg for arg in args]
-
-    def decorator(operator):
-        def wrapper(*args, **kwargs):
-            return operator(*args, **kwargs)
-
-        for key in key_list:
-            OPERATORS_REGISTRY[key] = operator
-
-        return wrapper
-    return decorator
-
-def supported_operators():
-    return list(OPERATORS_REGISTRY.keys())
diff --git a/aidge_core/export_utils/__init__.py b/aidge_core/export_utils/__init__.py
index 6fc846d93301f45b0635cd9b2fabae65fa7be8ab..a97e978749d1f5480ef8ef1e7e9c5f00d9c3d7df 100644
--- a/aidge_core/export_utils/__init__.py
+++ b/aidge_core/export_utils/__init__.py
@@ -1,2 +1,6 @@
-from .node_export import *
-from .code_generation import *
+from .node_export import ExportNode, ExportNodeCpp
+from .code_generation import generate_file, generate_str, copy_file
+from .export_registry import ExportLib
+from .scheduler_export import scheduler_export
+from .tensor_export import tensor_to_c, generate_input_file
+
diff --git a/aidge_core/export_utils/code_generation.py b/aidge_core/export_utils/code_generation.py
index a02fc0966702cec7a2cbe33f8411bb71e3035e90..995df18b52d5701af5259d571e6a0a91a83ba665 100644
--- a/aidge_core/export_utils/code_generation.py
+++ b/aidge_core/export_utils/code_generation.py
@@ -1,7 +1,8 @@
 from pathlib import Path
-from jinja2 import Environment, FileSystemLoader
+from jinja2 import Environment, FileSystemLoader, StrictUndefined
 from typing import Union
-
+import os
+import shutil
 
 def generate_file(file_path: Union[Path, str], template_path: Union[Path, str], **kwargs) -> None:
     """Generate a file at `file_path` using the jinja template located at `file_path`.
@@ -21,13 +22,9 @@ def generate_file(file_path: Union[Path, str], template_path: Union[Path, str],
     # Make dir
     file_path.parent.mkdir(parents=True, exist_ok=True)
 
-    # Select template
-    template = Environment(loader=FileSystemLoader(
-        template_path.parent)).get_template(template_path.name)
-
     # Generate file
     with open(file_path, mode="w", encoding="utf-8") as file:
-        file.write(template.render(kwargs))
+        file.write(generate_str(template_path, **kwargs))
 
 
 def generate_str(template_path: Union[Path, str], **kwargs) -> str:
@@ -43,4 +40,12 @@ def generate_str(template_path: Union[Path, str], **kwargs) -> str:
     if isinstance(template_path, str):
         template_path = Path(template_path)
     return Environment(loader=FileSystemLoader(
-        template_path.parent)).get_template(template_path.name).render(kwargs)
+        template_path.parent), undefined=StrictUndefined, keep_trailing_newline=True).get_template(template_path.name).render(kwargs)
+
+def copy_file(filename, dst_folder):
+
+    # If directory doesn't exist, create it
+    if not os.path.exists(dst_folder):
+        os.makedirs(dst_folder)
+
+    shutil.copy(filename, dst_folder)
diff --git a/aidge_core/export_utils/data_conversion.py b/aidge_core/export_utils/data_conversion.py
new file mode 100644
index 0000000000000000000000000000000000000000..401fc39f2a70245a67719699b5f0cdc61108e0cf
--- /dev/null
+++ b/aidge_core/export_utils/data_conversion.py
@@ -0,0 +1,30 @@
+import numpy as np
+import aidge_core
+
+
+datatype_converter_aide2c = {
+    aidge_core.dtype.float64 : "double",
+    aidge_core.dtype.float32 : "float",
+    aidge_core.dtype.float16 : "half_float::half",
+    aidge_core.dtype.int8    : "int8_t",
+    aidge_core.dtype.int16   : "int16_t",
+    aidge_core.dtype.int32   : "int32_t",
+    aidge_core.dtype.int64   : "int64_t",
+    aidge_core.dtype.uint8   : "uint8_t",
+    aidge_core.dtype.uint16  : "uint16_t",
+    aidge_core.dtype.uint32  : "uint32_t",
+    aidge_core.dtype.uint64  : "uint64_t"
+}
+
+def aidge2c(datatype):
+    """Convert a aidge datatype to C type
+
+    :param datatype: Aidge datatype to convert
+    :type datatype: :py:object:`aidge_core.DataType`
+    :return: A string representing the C type
+    :rtype: string
+    """
+    if datatype in datatype_converter_aide2c:
+        return datatype_converter_aide2c[datatype]
+    else:
+        raise ValueError(f"Unsupported {datatype} aidge datatype")
diff --git a/aidge_core/export_utils/export_registry.py b/aidge_core/export_utils/export_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..eabc6eb6b656b0c0b0c8381b665d6a6d8f3805ff
--- /dev/null
+++ b/aidge_core/export_utils/export_registry.py
@@ -0,0 +1,114 @@
+from typing import Dict, List
+import aidge_core
+from aidge_core.export_utils import ExportNode
+
+class classproperty:
+    """Helper class to define class properties,
+    Is equivalent to applying the decorator ``@property`` and ``@classmethod``.
+    But these two decorator are exclusive with python > 12.
+    See discussion https://discuss.python.org/t/add-a-supported-read-only-classproperty-decorator-in-the-stdlib/18090/12
+    """
+
+    def __init__(self, fget):
+        self.fget = fget
+
+    def __get__(self, instance, owner):
+        return self.fget(owner)
+
+
+# TODO: very naive implementation !
+# error handling should be added !
+class ExportLib(aidge_core.OperatorImpl):
+    """Aidge export lib, define a registry
+    """
+    # PUBLIC
+    # Lib name useful ?
+    # Help define namespace
+    _name: str = None
+    # key: Path where static file is
+    # Value: Path where to copy the file relative to the export root
+    static_files: Dict[str, str] = {}
+    # PRIVATE
+    # Registry of exportNode, class level dictionary, shared across all ExportLib
+    _cls_export_node_registry = {}
+
+    def __init__(self, operator):
+        super(ExportLib, self).__init__(operator, self._name)
+
+    @classproperty
+    def _export_node_registry(cls) -> Dict[str, List['ExportNode']]:
+        """Define as a class property to access the registry at class level while keeping it at instance level.
+
+        :return: The export node registry specific to the class
+        :rtype: Dict[str, List[ExportNode]]
+        """
+        return cls._cls_export_node_registry.setdefault(cls, {})
+
+    # Override the virtual OperatorImpl method, in order to provide available
+    # implementation specifications
+    def get_available_impl_specs(self):
+        if self.get_operator().type() in self._export_node_registry:
+            spec_vec = [i for i, _ in self._export_node_registry[self.get_operator().type()]]
+            return spec_vec
+        else:
+            return []
+
+    def get_export_node(self, spec: aidge_core.aidge_core.ImplSpec):
+        for registered_spec, export_node in self._export_node_registry[self.get_operator().type()]:
+            if registered_spec == spec:
+
+                return export_node
+        return None
+
+    # Decorator to register kernels for this export
+    @classmethod
+    def register(cls, op_type, spec):
+        def decorator(operator):
+            class Wrapper(operator):
+                def __init__(self, *args, **kwargs):
+                    return operator(*args, **kwargs)
+            type_list = []
+            if isinstance(op_type, list):
+                type_list = op_type
+            elif isinstance(op_type, str):
+                type_list = [op_type]
+            else:
+                raise TypeError("Argument type of register method should be of type 'List[str]' or 'str', got {type(type)}")
+
+            for type_name in type_list:
+                if (type_name not in cls._export_node_registry):
+                    cls._export_node_registry[type_name] = []
+                cls._export_node_registry[type_name].append((spec, operator))
+
+                register_func: str = f"register_{type_name}Op"
+                # If operator is not defined, then it means we try to register a MetaOperator
+                if register_func not in dir(aidge_core):
+                    raise ValueError(f"Operator of type: {type_name} is not declared as registrable!\nHint: If you try to register a MetaOperator use register_metaop instead.")
+                else:
+                    # Equivalent to aidge_core.register_ConvOp("ExportLibX", ExportLibX)
+                    aidge_core.__getattribute__(register_func)(cls._name, cls)
+            return Wrapper
+        return decorator
+
+    # Decorator to register kernels for this export
+    @classmethod
+    def register_metaop(cls, op_type, spec):
+        def decorator(operator):
+            class Wrapper(operator):
+                def __init__(self, *args, **kwargs):
+                    return operator(*args, **kwargs)
+            type_list = []
+            if isinstance(op_type, list):
+                type_list = op_type
+            elif isinstance(op_type, str):
+                type_list = [op_type]
+            else:
+                raise TypeError("Argument 'op_type' of register method should be of type 'List[str]' or 'str', got {type(type)}")
+            for type_name in type_list:
+                if (type_name not in cls._export_node_registry):
+                    cls._export_node_registry[type_name] = []
+                cls._export_node_registry[type_name].append((spec, operator))
+                aidge_core.register_MetaOperatorOp([cls._name, type_name], cls)
+                spec.attrs.add_attr("type", type_name) # MetaOperator specs need to verify the type
+            return Wrapper
+        return decorator
diff --git a/aidge_core/export_utils/node_export.py b/aidge_core/export_utils/node_export.py
index 7aceaa0ccc1f07674241d6f35bbeff90330f2596..d22cc65d1eb247d09a48318d83274acfc3757d3d 100644
--- a/aidge_core/export_utils/node_export.py
+++ b/aidge_core/export_utils/node_export.py
@@ -1,61 +1,320 @@
-from aidge_core import Node, Attributes
+import aidge_core
+from pathlib import Path
 
+from aidge_core.export_utils import data_conversion, code_generation
 from abc import ABC, abstractmethod
+from typing import List
+
+
+def get_chan(tensor: aidge_core.Tensor) -> int:
+    """Given a tensor return the number of channel
+    """
+    dformat = tensor.dformat()
+    dims = tensor.dims()
+    if dformat == aidge_core.dformat.default:
+        if len(dims) == 4:  # Suppose NCHW
+            return dims[1]
+        elif len(dims) == 2:  # Suppose NC
+            return dims[1]
+        else:
+            return None
+    elif dformat == aidge_core.dformat.nchw:
+        return dims[1]
+    elif dformat == aidge_core.dformat.nhwc:
+        return dims[3]
+    elif dformat == aidge_core.dformat.chwn:
+        return dims[0]
+    elif dformat == aidge_core.dformat.ncdhw:
+        return dims[1]
+    elif dformat == aidge_core.dformat.ndhwc:
+        return dims[4]
+    elif dformat == aidge_core.dformat.cdhwn:
+        return dims[0]
+    else:
+        raise RuntimeError(f"Unknown dataformat: {dformat}")
+
+
+def get_height(tensor: aidge_core.Tensor) -> int:
+    dformat = tensor.dformat()
+    dims = tensor.dims()
+    if dformat == aidge_core.dformat.default:
+        if len(dims) == 4:  # Suppose NCHW
+            return dims[2]
+        elif len(dims) == 2:  # Suppose NC
+            return 1
+        else:
+            return None
+    elif dformat == aidge_core.dformat.nchw:
+        return dims[2]
+    elif dformat == aidge_core.dformat.nhwc:
+        return dims[1]
+    elif dformat == aidge_core.dformat.chwn:
+        return dims[1]
+    elif dformat == aidge_core.dformat.ncdhw:
+        return dims[3]
+    elif dformat == aidge_core.dformat.ndhwc:
+        return dims[2]
+    elif dformat == aidge_core.dformat.cdhwn:
+        return dims[2]
+    else:
+        raise RuntimeError(f"Unknown dataformat: {dformat}")
+
+
+def get_width(tensor: aidge_core.Tensor) -> int:
+    dformat = tensor.dformat()
+    dims = tensor.dims()
+    if dformat == aidge_core.dformat.default:
+        if len(dims) == 4:  # Suppose NCHW
+            return dims[3]
+        elif len(dims) == 2:  # Suppose NC
+            return 1
+        else:
+            return None
+    elif dformat == aidge_core.dformat.nchw:
+        return dims[3]
+    elif dformat == aidge_core.dformat.nhwc:
+        return dims[2]
+    elif dformat == aidge_core.dformat.chwn:
+        return dims[2]
+    elif dformat == aidge_core.dformat.ncdhw:
+        return dims[4]
+    elif dformat == aidge_core.dformat.ndhwc:
+        return dims[3]
+    elif dformat == aidge_core.dformat.cdhwn:
+        return dims[3]
+    else:
+        raise RuntimeError(f"Unknown dataformat: {dformat}")
 
 
 class ExportNode(ABC):
     """Abstract class to interface node with export generation.
+
+    This class expose a dictionary ``attributes`` which contains all the information required to generate an export:
+    - All the attributes of the Aidge node are automatically fetch, the key to get an attribute is the attribute name in python format, example ``no_bias``
+    - **name**: Name of the Node, ``str``
+    - **node**: Aidge Node, :py:class:`aidge_core.Node`
+    - **nb_in**: Number of inputs, ``int``
+    - **in_name**: unique name for each input, if no input node the name is ``{node_name}_input_{in_id}``, if there is a parent, the name is ``{parent_name}_output_{out_id}``, ``list[str]``
+    - **in_dims**: A list of the dimension for each inputs, ``list[list[int]]``
+    - **in_node**: A list of Node associated for each inputs, ``list[aidge_core.Node]``
+    - **in_size**: A list of the size for each inputs, ``list[int]``
+    - **in_chan**: A list of channel for each inputs, deduced by the dataformat, ``list[int]``
+    - **in_height**: A list of height for each inputs, deduced by the dataformat, ``list[int]``
+    - **in_width**: A list of width for each inputs, deduced by the dataformat, ``list[int]``
+    - **in_dtype**: A list of type (Aidge format) for each input, ``List[:py:class:`aidge_core.dtype`]``
+    - **in_cdtype**: A list of type (C/C++ format) for each input, ``List[str]``
+    - **out_name**: unique name for each output, the name is ``{name}_output_{out_id}``, ``list[str]``
+    - **out_node**: A list of list of Node associated for each outputs, ``list[list[aidge_core.Node]]``
+    - **nb_out**: Number of outputs, ``int``
+    - **out_dims**: A list of the dimension for each inputs, ``list[list[int]]``
+    - **out_size**: A list of the size for each outputs, ``list[int]``
+    - **out_chan**: A list of channel for each outputs, deduced by the dataformat, ``list[int]``
+    - **out_height**: A list of height for each outputs, deduced by the dataformat, ``list[int]``
+    - **out_width**: A list of width for each outputs, deduced by the dataformat, ``list[int]``
+    - **out_dtype**: A list of type (Aidge format) for each output, ``List[:py:class:`aidge_core.dtype`]``
+    - **out_cdtype**: A list of type (C/C++ format) for each output, ``List[str]``
+    - **mem_info**: True if mem_info is available for this node, ``bool``
+    - **mem_info_size**: A list of memory size for each output, ``List[int]``
+    - **mem_info_offset**: A list of offset to access each output, ``List[int]``
+    - **mem_info_stride**: old N2D2
+    - **mem_info_length**: old N2D2
+    - **mem_info_cont_size**: old N2D2
+    - **mem_info_cont_offset**: old N2D2
+    - **mem_info_wrap_offset**: old N2D2
+    - **mem_info_wrap_size**: old N2D2
     """
 
     @abstractmethod
-    def __init__(self, aidge_node: Node) -> None:
-        """Create ExportNode and retieve attirubtes from ``aidge_node``:
-
-        - name: aidge Node name
-        - attributes: dictionnary of attributes of the aidge Operator linked to the node, attributes name follow aidge naming convention
-        - parameters: List of parameters node, order in the list is the same as the one defined by the aidge operator
-
+    def __init__(self, aidge_node: aidge_core.Node, mem_info: List[dict]=None) -> None:
+        """Create ExportNode and retrieve attributes from ``aidge_node``:
         """
+
         super().__init__()
         self.node = aidge_node
         self.operator = aidge_node.get_operator()
-        self.name = self.node.name()
-        self.attributes = self.operator.attr.dict() if self.operator.attr is not None else {} # Attributes are auto fetched from aidge operators
-
-        # rename is_leaf ?
-        self.is_last = len(self.node.get_children()) == 0
-
+        # Attributes are auto fetched from aidge operators
+        self.attributes = {}  if isinstance(self.operator, aidge_core.MetaOperatorOp) or self.operator.attr is None else self.operator.attr.dict()
+        self.attributes["node"] = self.node
+        self.attributes["name"] = self.node.name()
+        self.attributes["nb_in"] = self.node.get_nb_inputs()
+        self.attributes["nb_out"] = self.node.get_nb_outputs()
 
+        # List of input nodes
         self.inputs = []
+        # List of output nodes
         self.outputs = []
-        self.inputs_dims = []
-        self.outputs_dims = []
 
-        for idx, parent_node in enumerate(self.node.get_parents()):
-            self.inputs.append(parent_node)
-            if parent_node is not None:
-                self.inputs_dims.append(self.operator.get_input(idx).dims())
-            else:
-                if self.operator.get_input(idx) is not None:
-                    self.inputs_dims.append(self.operator.get_input(idx).dims())
-                else:
-                    self.inputs_dims.append(None)
+        self.attributes["in_name"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_node"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_dims"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_size"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_dformat"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_format"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_dtype"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_cdtype"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_chan"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_height"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_width"] = [None] * self.attributes["nb_in"]
 
-        for idx, child_node in enumerate(self.node.get_children()):
-            self.outputs.append(child_node)
+        self.attributes["out_name"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_nodes"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_dims"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_size"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_dformat"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_format"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_dtype"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_cdtype"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_chan"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_height"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_width"] = [None] * self.attributes["nb_out"]
 
-        # Dirty hot fix, change it quickly
-        self.outputs_dims.append(self.operator.get_output(0).dims())
+        # Producer don't have mem_info
+        # TODO: document this attribute
+        # true if node have mem_info else false
+        self.attributes["mem_info"] = mem_info is not None and self.node.type() != "Producer"
+        if self.attributes["mem_info"]:
+            self.attributes["mem_info_size"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_offset"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_stride"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_length"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_cont_size"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_cont_offset"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_wrap_offset"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_wrap_size"] = [None] * self.attributes["nb_out"]
 
-    @abstractmethod
-    def export(self, export_folder:str, list_configs:list):
+        for idx, parent_node_in_id in enumerate(self.node.inputs()):
+            parent_node, out_id = parent_node_in_id
+            self.inputs.append(parent_node)
+            if self.operator.get_input(idx) is not None:
+                tensor = self.operator.get_input(idx)
+                self.attributes["in_name"][idx] = f"{self.attributes['name']}_input_{idx}" if parent_node is None else f"{parent_node.name()}_output_{out_id}"
+                self.attributes["in_node"][idx] = parent_node
+                self.attributes["in_dims"][idx] = tensor.dims()
+                self.attributes["in_size"][idx] = tensor.size()
+                self.attributes["in_dformat"][idx] = tensor.dformat()
+                self.attributes["in_format"][idx] = aidge_core.format_as(tensor.dformat())
+                self.attributes["in_dtype"][idx] = tensor.dtype()
+                self.attributes["in_cdtype"][idx] = data_conversion.aidge2c(
+                    tensor.dtype())
+                self.attributes["in_chan"][idx] = get_chan(tensor)
+                self.attributes["in_height"][idx] = get_height(tensor)
+                self.attributes["in_width"][idx] = get_width(tensor)
+            elif self.operator.input_category(idx) == aidge_core.InputCategory.OptionalParam or \
+                self.operator.input_category(idx) == aidge_core.InputCategory.OptionalData:
+                pass
+            else:
+                raise RuntimeError(f"No input for {self.node.name()} at input {idx}, did you forget to forward dims?")
+        for idx, list_child_node_in_id in enumerate(self.node.outputs()):
+            out_nodes = [node_in_id[0]
+                             for node_in_id in list_child_node_in_id]
+            self.outputs += out_nodes
+            if self.operator.get_output(idx) is not None:
+                tensor = self.operator.get_output(idx)
+                self.attributes["out_name"][idx] = f"{self.attributes['name']}_output_{idx}"
+                self.attributes["out_nodes"][idx] = out_nodes
+                self.attributes["out_dims"][idx] = tensor.dims()
+                self.attributes["out_size"][idx] = tensor.size()
+                self.attributes["out_dformat"][idx] = tensor.dformat()
+                self.attributes["out_format"][idx] = aidge_core.format_as(tensor.dformat())
+                self.attributes["out_dtype"][idx] = tensor.dtype()
+                self.attributes["out_cdtype"][idx] = data_conversion.aidge2c(
+                    tensor.dtype())
+                self.attributes["out_chan"][idx] = get_chan(tensor)
+                self.attributes["out_height"][idx] = get_height(tensor)
+                self.attributes["out_width"][idx] = get_width(tensor)
+                # Output mem_info
+                # TODO: add to docstring
+                if self.attributes["mem_info"]:
+                    if "size" in mem_info[idx]:
+                        self.attributes["mem_info_size"][idx] = mem_info[idx]["size"]
+                    else:
+                        raise RuntimeError("Size is mandatory")
+                    if "offset" in mem_info[idx]:
+                        self.attributes["mem_info_offset"][idx] = mem_info[idx]["offset"]
+                    else:
+                        raise RuntimeError("Offset is mandatory")
+                    if "stride" in mem_info[idx]:
+                        self.attributes["mem_info_stride"][idx] = mem_info[idx]["stride"]
+                    else:
+                        self.attributes["mem_info_stride"][idx] = mem_info[idx]["size"]
+                    if "length" in mem_info[idx]:
+                        self.attributes["mem_info_length"][idx] = mem_info[idx]["length"]
+                    else:
+                        self.attributes["mem_info_length"][idx] = tensor.size()
+                    if "cont_size" in mem_info[idx]:
+                        self.attributes["mem_info_cont_size"][idx] = mem_info[idx]["cont_size"]
+                    else:
+                        self.attributes["mem_info_cont_size"][idx] = mem_info[idx]["size"]
+                    if "cont_offset" in mem_info[idx]:
+                        self.attributes["mem_info_cont_offset"][idx] = mem_info[idx]["cont_offset"]
+                    else:
+                        self.attributes["mem_info_cont_offset"][idx] = mem_info[idx]["offset"]
+                    if "cont_offset" in mem_info[idx]:
+                        self.attributes["mem_info_wrap_offset"][idx] = mem_info[idx]["wrap_offset"]
+                    else:
+                        self.attributes["mem_info_wrap_offset"][idx] = 0
+                    if "wrap_size" in mem_info[idx]:
+                        self.attributes["mem_info_wrap_size"][idx] = mem_info[idx]["wrap_size"]
+                    else:
+                        self.attributes["mem_info_wrap_size"][idx] = 0
+            else:
+                raise RuntimeError(f"No output for {self.node.name()}")
+
+class ExportNodeCpp(ExportNode):
+    # Path to the template defining how to export the node definition
+    config_template: str = None
+    # Path to the template defining how to export the node definition
+    forward_template: str = None
+    # List of includes to add example "include/toto.hpp"
+    include_list: list = None
+    # A list of path of kernels to copy in the export
+    # kernels are copied in str(export_folder / "include" / "kernels")
+    # They are automatically added to the include list.
+    kernels_to_copy: list = None
+    # Path where all the kernels are stored in the export (prefixed by export_root)
+    kernels_path: str = "include/kernels"
+    # Path of config folders
+    config_path: str = "include/layers"
+    # Config_folder_extension
+    config_extension: str = "h"
+    def export(self, export_folder: str):
         """Define how to export the node definition.
         """
-        pass
+        if self.config_template is None:
+            raise ValueError("config_template have not been defined")
+        if self.include_list is None:
+            raise ValueError("include_list have not been defined")
+        if self.kernels_to_copy is None:
+            raise ValueError("kernels_to_copy have not been defined")
 
-    @abstractmethod
-    def forward(self, list_actions:list):
+        kernel_include_list = []
+        for kernel in self.kernels_to_copy:
+            kernel_path = Path(kernel)
+            code_generation.copy_file(
+                kernel_path,
+                str(export_folder / self.kernels_path)
+            )
+            kernel_include_list.append(
+                self.kernels_path + "/" + kernel_path.stem + kernel_path.suffix)
+
+        if self.config_template != "":
+            path_to_definition = f"{self.config_path}/{self.attributes['name']}.{self.config_extension}"
+            code_generation.generate_file(
+                str(export_folder / path_to_definition),
+                self.config_template,
+                **self.attributes
+            )
+            kernel_include_list.append(path_to_definition)
+
+        return self.include_list + kernel_include_list
+
+    def forward(self):
         """Define how to generate code to perform a forward pass.
         """
-        pass
-
+        if self.forward_template is None:
+            raise ValueError("forward_template have not been defined")
+        forward_call: str = code_generation.generate_str(
+            self.forward_template,
+            **self.attributes
+        )
+        return [forward_call]
diff --git a/aidge_core/export_utils/scheduler_export.py b/aidge_core/export_utils/scheduler_export.py
new file mode 100644
index 0000000000000000000000000000000000000000..6829832feda7af7b2c808df4cd430fc77b37b3cb
--- /dev/null
+++ b/aidge_core/export_utils/scheduler_export.py
@@ -0,0 +1,152 @@
+import aidge_core
+import os
+import shutil
+from pathlib import Path
+from aidge_core.export_utils import ExportLib, generate_file, copy_file
+from typing import List, Tuple
+
+
+def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib = None, memory_manager=None, memory_manager_args=None) -> None:
+        graphview = scheduler.graph_view()
+        export_folder = Path().absolute() / export_folder_path
+
+        os.makedirs(str(export_folder), exist_ok=True)
+
+        dnn_folder = export_folder / "dnn"
+        os.makedirs(str(dnn_folder), exist_ok=True)
+
+        if memory_manager_args is None:
+            memory_manager_args = {}
+
+        if memory_manager is None:
+            raise ValueError("A memory manager is required (no default value yet).")
+        peak_mem, mem_info = memory_manager(
+            scheduler, **memory_manager_args)
+
+        # List of function call for forward.cpp
+        list_actions: List[str] = []
+        # List of headers for forward.cpp
+        list_configs: List[str] = []
+
+        inputs_name: List[str] = []
+        inputs_dtype: List[str] = []
+        outputs_name: List[str] = []
+        outputs_dtype: List[str] = []
+        outputs_size: List[int] = []
+
+        # List of aidge_core.Node ordered by scheduler
+        list_forward_nodes: List[aidge_core.Node] = scheduler.get_static_scheduling()
+
+        # If exportLib define use it
+        # else parse component in platform
+        # if export_lib is None:
+        #     raise ValueError("Export need an ExportLib.")
+        for node in list_forward_nodes:
+            if export_lib is not None:
+                aidge_core.Log.debug(f"Setting backend {export_lib._name} to {node.name()}[{node.type()}].")
+                node.get_operator().set_backend(export_lib._name)
+
+            op_impl = node.get_operator().get_impl()
+            if op_impl is None:
+                raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.")
+            if not isinstance(op_impl, ExportLib):
+                raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).")
+
+            is_input:bool  = node in graphview.get_input_nodes()
+            is_output:bool = node in graphview.get_output_nodes()
+
+            if is_input:
+                # GraphView.get_inputs_nodes() returns the nodes that have an Input set to None or not in the graph
+                # However, some inputs are Optional and thus the node may not be an input of the graph!
+                # So we need ot check that all the inputs of the nodes or in the graph or not optional
+                # This is what the following code block is checking.
+                for idx, node_in in enumerate(node.inputs()):
+                    optional:bool = node.get_operator().input_category(idx) == aidge_core.InputCategory.OptionalData
+                    # Note: node_in is a Tuple(Node, out_idx)
+                    in_graph:bool = node_in[0] in graphview.get_nodes()
+                    is_input &= (in_graph or not optional)
+
+            # Get operator current specs
+            required_specs = op_impl.get_required_spec()
+            # Get specs of the implementation that match current specs
+            specs = op_impl.get_best_match(required_specs)
+            # Retrieve said implementation
+            export_node = op_impl.get_export_node(specs)
+
+            if export_node is None:
+                raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
+            # Instanciate ExportNode
+            op = export_node(node, mem_info[node])
+
+            # For configuration files
+            list_configs += op.export(dnn_folder)
+            # For forward file
+            list_actions += op.forward()
+            if is_input:
+                for idx, node_in in enumerate(node.inputs()):
+                    if node_in[0] not in graphview.get_nodes():
+                        inputs_name.append(op.attributes["in_name"][idx])
+                        inputs_dtype.append(
+                            op.attributes["in_cdtype"][idx]
+                        )
+            if is_output:
+                for idx in range(len(node.outputs())):
+                    outputs_name.append(op.attributes["out_name"][idx])
+                    outputs_dtype.append(
+                        op.attributes["out_cdtype"][idx]
+                    )
+                    outputs_size.append(op.attributes["out_size"][idx])
+
+        func_name = "model_forward"
+
+
+        args = ", ".join([f"const {dtype}* {name}" for name,
+                         dtype in zip(inputs_name, inputs_dtype)])
+        args += ", " +", ".join([f"{dtype}** {name}" for name,
+                          dtype in zip(outputs_name, outputs_dtype)])
+        forward_func = f"void {func_name}({args})"
+
+        ROOT = Path(__file__).resolve().parents[0]
+        generate_file(
+            str(dnn_folder / "src" / "forward.cpp"),
+            str(ROOT / "templates" / "forward.jinja"),
+            func_name=func_name,
+            headers=set(list_configs),
+            actions=list_actions,
+            mem_ctype=inputs_dtype[0],  # Legacy behavior ...
+            peak_mem=peak_mem,
+            inputs_name=inputs_name,
+            inputs_dtype=inputs_dtype,
+            outputs_name=outputs_name,
+            outputs_dtype=outputs_dtype
+        )
+
+        # Generate dnn API
+        generate_file(
+            str(dnn_folder / "include" / "forward.hpp"),
+            str(ROOT / "templates" / "forward_header.jinja"),
+            libraries=[],
+            func_name=func_name,
+            inputs_name=inputs_name,
+            inputs_dtype=inputs_dtype,
+            outputs_name=outputs_name,
+            outputs_dtype=outputs_dtype
+        )
+
+        if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
+            raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
+
+        generate_file(
+            str(export_folder / "main.cpp"),
+            str(ROOT / "templates" / "main.jinja"),
+            func_name=func_name,
+            inputs_name=inputs_name,
+            outputs_name=outputs_name,
+            outputs_dtype=outputs_dtype,
+            outputs_size=outputs_size
+        )
+
+        if export_lib is not None:
+            # Copy all static files in the export
+            for source, destination in export_lib.static_files.items():
+                copy_file(source, str(export_folder / destination))
diff --git a/aidge_core/export_utils/templates/c_data.jinja b/aidge_core/export_utils/templates/c_data.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..3709379e5c5417233341837d853cc6f68872a194
--- /dev/null
+++ b/aidge_core/export_utils/templates/c_data.jinja
@@ -0,0 +1,6 @@
+{#- For libraries #}
+#include <stdint.h>
+ {# Design header of the array -#}
+static const {{ data_t }} {{ name }}[{{ dims |join("*") }}] __attribute__((section("nn_data"))) = {
+{{ values |join(", ") }}
+};
diff --git a/aidge_core/export_utils/templates/forward.jinja b/aidge_core/export_utils/templates/forward.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..aec5867b132e0ece9a8a39d9cf5daadeb25ea24a
--- /dev/null
+++ b/aidge_core/export_utils/templates/forward.jinja
@@ -0,0 +1,37 @@
+
+#include <stdint.h>
+
+#ifdef SAVE_OUTPUTS
+#include <sys/types.h>
+#include <sys/stat.h>
+#endif
+
+#include "include/forward.hpp"
+
+// Layer & memory configurations
+{%- for header in headers %}
+#include "{{ header }}"
+{%- endfor %}
+
+// Memory block
+static {{mem_ctype}} mem[{{peak_mem}}];
+
+{# Forward function #}
+{#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#}
+void {{ func_name }} (
+    {%- for i in range(inputs_name | length) -%}
+    const {{ inputs_dtype[i] }}* {{ inputs_name[i] }},
+    {%- endfor -%}
+    {%- for o in range(outputs_name | length) -%}
+    {{ outputs_dtype[o] }}** {{ outputs_name[o] }}_ptr{% if not loop.last %}, {% endif %}
+    {%- endfor -%})
+{
+
+    {%- for action in actions %}
+    {{ action }}
+    {%- endfor %}
+
+    {%- for output_name in outputs_name %}
+    *{{ output_name }}_ptr = {{ output_name }};
+    {%- endfor %}
+}
diff --git a/aidge_core/export_utils/templates/forward_header.jinja b/aidge_core/export_utils/templates/forward_header.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..574f5323866786f3b6d6f98af53b8ccf3d9975b3
--- /dev/null
+++ b/aidge_core/export_utils/templates/forward_header.jinja
@@ -0,0 +1,25 @@
+#ifndef DNN_HPP
+#define DNN_HPP
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+{#- For libraries #}
+{% for lib in libraries %}
+#include <{{ lib }}>
+{%- endfor %}
+
+void {{ func_name }} (
+    {%- for i in range(inputs_name | length) -%}
+    const {{ inputs_dtype[i] }}* {{ inputs_name[i] }},
+    {%- endfor -%}
+    {%- for o in range(outputs_name | length) %}
+    {{ outputs_dtype[o] }}** {{ outputs_name[o] }}{% if not loop.last %}, {% endif %}
+    {%- endfor -%});
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* DNN_HPP */
diff --git a/aidge_core/export_utils/templates/main.jinja b/aidge_core/export_utils/templates/main.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..d0c22719a5d9b9eaa15d3ef9ef86307a060b54be
--- /dev/null
+++ b/aidge_core/export_utils/templates/main.jinja
@@ -0,0 +1,40 @@
+
+#include <iostream>
+#include "forward.hpp"
+{% for name in inputs_name %}
+#include "{{ name }}.h"
+{% endfor %}
+
+{% set printf_formats = {
+    "double": "%lf",
+    "float": "%f",
+    "int8_t": "%hhd",
+    "int16_t": "%hd",
+    "int32_t": "%d",
+    "int64_t": "%lld",
+    "uint8_t": "%hhu",
+    "uint16_t": "%hu",
+    "uint32_t": "%u",
+    "uint64_t": "%llu"
+} %}
+
+int main()
+{
+    // Initialize the output arrays
+    {%- for o in range(outputs_name | length) %}
+    {{ outputs_dtype[o] }}* {{ outputs_name[o] }} = nullptr;
+    {% endfor %}
+
+    // Call the forward function
+    {{ func_name }}({{ inputs_name|join(", ") }}, &{{ outputs_name|join(", &") }});
+
+    // Print the results of each output
+    {%- for o in range(outputs_name | length) %}
+    printf("{{ outputs_name[o] }}:\n");
+    for (int o = 0; o < {{ outputs_size[o] }}; ++o) {
+        printf("{{ printf_formats[outputs_dtype[o]] }} ", {{ outputs_name[o] }}[o]);
+    }
+    printf("\n");
+    {% endfor %}
+    return 0;
+}
diff --git a/aidge_core/export_utils/tensor_export.py b/aidge_core/export_utils/tensor_export.py
new file mode 100644
index 0000000000000000000000000000000000000000..43f013dd02e730ae72c89a5bbe329c4fbb8a0324
--- /dev/null
+++ b/aidge_core/export_utils/tensor_export.py
@@ -0,0 +1,40 @@
+import os
+
+from aidge_core.export_utils.code_generation import generate_file
+from aidge_core.export_utils.data_conversion import aidge2c
+from aidge_core import Tensor
+from pathlib import Path
+
+def tensor_to_c(tensor:Tensor)->str:
+    """Given a :py:class:``aigd_core.Tensor``, return a C description of the tensor.
+    For example:
+    {
+        {1, 2},
+        {3, 4}
+    }
+
+    :param tensor: Tensor to transform to a string
+    :type tensor: Tensor
+    :return: String representation of a C array
+    :rtype: str
+    """
+    return str(tensor)
+
+def generate_input_file(export_folder:str,
+                        array_name:str,
+                        tensor:Tensor):
+
+    # If directory doesn't exist, create it
+    if not os.path.exists(export_folder):
+        os.makedirs(export_folder)
+    print(f"gen : {export_folder}/{array_name}.h")
+    ROOT = Path(__file__).resolve().parents[0]
+    generate_file(
+        file_path=f"{export_folder}/{array_name}.h",
+        template_path=str(ROOT / "templates" / "c_data.jinja"),
+        dims = tensor.dims(),
+        data_t = aidge2c(tensor.dtype()),
+        name = array_name,
+        values = list(tensor)
+    )
+
diff --git a/aidge_core/mem_info.py b/aidge_core/mem_info.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f2f48dd64aba276e70940d5bf461da6f50a4b38
--- /dev/null
+++ b/aidge_core/mem_info.py
@@ -0,0 +1,119 @@
+import os
+import shutil
+from pathlib import Path
+import aidge_core
+from typing import Tuple, List
+
+
+# Default memory management, which can be used for development
+def compute_default_mem_info(scheduler: aidge_core.Scheduler) -> Tuple[int, List]:
+    """Basic memory management concatenate memory block, no memory reuse !
+
+    :param scheduler: Aidge scheduler
+    :type scheduler: :py:class:`aidge_core.Scheduler`
+    :return: The total memory size (in number of elements) and a list (of size nb node) of list (of size nb output) of dictionnary (size, offset)
+    :rtype: Tuple[int, list]
+    """
+    mem_info = {}
+    mem_size = 0
+
+    # Exclude Producers and the last layers (because the results are stored outside the export)
+    for i, node in enumerate(scheduler.get_static_scheduling()):
+        if node.type() != "Producer":
+            node_mem_info = []
+            for out_id in range(node.get_nb_outputs()):
+                dims = node.get_operator().get_output(out_id).dims()
+                mem = 1
+                for dim in dims:
+                    mem *= dim
+
+                # Add memeory info
+                node_mem_info.append({
+                    "size": mem,
+                    "offset": mem_size
+                })
+
+                # Increment offset for the next layer
+                mem_size += mem
+            mem_info[node] = node_mem_info
+        else:
+            mem_info[node] = [] # No meminfo for producer
+    return mem_size, mem_info
+
+
+def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder: Path, wrapping: bool = False) -> Tuple[int, List[dict]]:
+    # The forward dims has to done outside the function
+    # Also supposed the generation of the scheduler has been performed outside
+    # Otherwise decomment the following line
+    # scheduler.generate_scheduling()
+    # Generate the memory manager
+    # So far, the Producers are not take in consideration in the meory manager => inc_producers=False
+    mem_manager = scheduler.generate_memory(
+        inc_producers=False, wrap_around_buffer=wrapping)
+
+    # List of nodes which are connected at the input of the graph (None if input is not connected)
+    nodes_at_input = [n[0] for n in scheduler.graph_view().inputs()]
+    # Use gnuplot to generate the log
+    if isinstance(stats_folder, str):
+        stats_folder = Path(stats_folder)
+    os.makedirs(str(Path(stats_folder) / "graph"), exist_ok=True)
+    mem_manager.log("memory_info")
+    os.chmod("memory_info_plot.gnu", 0o777)
+    os.system("./memory_info_plot.gnu")
+    shutil.move("memory_info", str(Path(stats_folder) / "graph" / "memory_info"))
+    shutil.move("memory_info_plot.png", str(
+        Path(stats_folder) / "graph" / "memory_info_plot.png"))
+    os.remove("memory_info_plot.gnu")
+    # In the export, we currently use an unified memory buffer whose size
+    # is determined by the memory peak usage
+    mem_size = mem_manager.get_peak_usage()
+    mem_info = {}
+
+    mem_planes = mem_manager.get_planes()
+
+    for node in scheduler.get_static_scheduling():
+        node_mem_info = []
+        if node.type() == "Producer":
+            pass
+        elif node in nodes_at_input:
+            # Input memory management (suppose tensor ends with [:, channel, height, width]))
+            tensor = node.get_operator().get_output(0)
+            if tensor is None:
+                raise RuntimeError("Warning input producer not provided")
+            if len(tensor.dims()) < 3:
+                raise RuntimeError(
+                    f"Input producer dimensions must be with [:, channel, height, width] but got {tensor.dims()} instead")
+            # TODO : use get_chan get_height and get_width function !
+            node_mem_info.append({
+                    "size": tensor.dims()[-3],          # Should be nb_channels
+                    "offset": 0,                        # Suppose input data is stored outside the export function
+                                                        # so the memory offset is not important to consider
+                    "stride": tensor.dims()[-3],        # Should be nb_channels
+                    "length": tensor.dims()[-1],        # Should be width
+                    "count":  tensor.dims()[-2],        # Should be height
+                    "cont_offset": 0,                   # Suppose input data is stored outside the export function
+                                                        # so the memory offset is not important to consider
+                    "cont_size": tensor.dims()[-1] * \
+                                 tensor.dims()[-2] * \
+                                 tensor.dims()[-3],     # Size of input
+                    "wrap_offset": 0,                   # No wrapping
+                    "wrap_size": 0                      # No wrapping
+                })
+        else:
+            for out_id in range(node.get_nb_outputs()):
+                plane = mem_planes[node][out_id]
+                node_mem_info.append({
+                    "size": plane.size,
+                    "offset": plane.get_contiguous_offset(),
+                    "stride": plane.stride,
+                    "length": plane.length,
+                    "count": plane.count,
+                    "cont_offset": plane.get_contiguous_offset(),
+                    "cont_size": plane.get_contiguous_size(),
+                    "wrap_offset": plane.get_wrapped_offset(),
+                    "wrap_size": plane.get_wrapped_size()
+                })
+        mem_info[node] = node_mem_info
+    return mem_size, mem_info
+
+
diff --git a/aidge_core/show_graphview.py b/aidge_core/show_graphview.py
index ddf0fc4b4659a727c7879738ef5e3eb40186cac1..633298f10dbfdafe40022f88f741f82d2d35c681 100644
--- a/aidge_core/show_graphview.py
+++ b/aidge_core/show_graphview.py
@@ -4,24 +4,24 @@ import builtins
 import aidge_core
 import numpy as np
 from pathlib import Path
- 
+
 def _retrieve_operator_attrs(node : aidge_core.Node) -> dict[str, int, float, bool, None]:
     """
     Returns the dictionary containing the attributes of a given Node.
 
-    :param graph: A Node in the list of ordered nodes. 
+    :param graph: A Node in the list of ordered nodes.
     :type graph: aidge_core.Node
 
     :return: A dictionary with the Node's attributes.
     :rtype: dict[str, int, float, bool, None]
-    """       
+    """
 
     if node.get_operator().attr is not None:
         node_attr_dict =  node.get_operator().attr.dict()
         for key,value in node_attr_dict.items():
             if not type(value).__name__ in dir(builtins):
                 node_attr_dict[key] = value.name
-    
+
     else:
         node_attr_dict = {}
 
@@ -29,49 +29,49 @@ def _retrieve_operator_attrs(node : aidge_core.Node) -> dict[str, int, float, bo
 
 def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_embed : bool, write_trainable_params_ext : bool, path_trainable_params : Path, params_file_format : str) -> dict[str, int, float, bool, None]:
     """
-    Creates a dictionary to store the information of a given ordered GraphView. 
+    Creates a dictionary to store the information of a given ordered GraphView.
 
     :param ordered_nodes: A list with the GraphView's ordered nodes.
     :type graph: list
-    :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed). 
+    :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed).
     :type write_trainable_params_embed: bool
-    :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file. 
+    :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file.
     :type write_trainable_params_ext: bool
     :param path_trainable_params: Path of the external file used to store the Nodes' trainable parameters.
     :type path_trainable_params: Path
     :param params_file_format: Format of the external file used to store the Nodes' trainable parameters. Options: ``npz`` or ``json``. Default : ``json``. Requires ``write_trainable_params_ext``.
     :type params_file_format: str
-    
+
     :return: A dictionary with the GraphView description.
     :rtype: dict[str, int, float, bool, None]
-    """            
+    """
 
     graphview_dict = {'graph': []}
 
     for node in ordered_nodes:
-        
+
         if node is not None:
-            node_dict = {'name' : node.name(), 
+            node_dict = {'name' : node.name(),
                          'optype' : node.get_operator().type(),
                          'nb_inputs' : node.get_operator().nb_inputs(),
                          'nb_outputs' : node.get_operator().nb_outputs()}
-            
+
             inputs = []
             for input_idx in range(node.get_operator().nb_inputs()):
                 input_dict = {'dims' : node.get_operator().get_input(input_idx).dims(),
                               'data_type' : str(node.get_operator().get_input(input_idx).dtype()),
-                              'data_format' : str(node.get_operator().get_input(input_idx).dformat())}              
-                inputs.append(input_dict)    
-            
+                              'data_format' : str(node.get_operator().get_input(input_idx).dformat())}
+                inputs.append(input_dict)
+
             node_dict['inputs'] = inputs
 
             outputs = []
             for output_idx in range(node.get_operator().nb_outputs()):
                 output_dict = {'dims' : node.get_operator().get_output(output_idx).dims(),
                                'data_type' : str(node.get_operator().get_output(output_idx).dtype()),
-                              'data_format' : str(node.get_operator().get_output(output_idx).dformat())}              
-                outputs.append(output_dict)    
-            
+                              'data_format' : str(node.get_operator().get_output(output_idx).dformat())}
+                outputs.append(output_dict)
+
             node_dict['outputs'] = outputs
 
             parents = node.get_parents()
@@ -79,8 +79,8 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e
                 if parents[0] is None: parents.append(parents.pop(0))
             else:
                 pass
-    
-            parents_inputs = [] 
+
+            parents_inputs = []
             for parent in parents:
                 if parent is not None:
                     for output_idx in range(parent.get_operator().nb_outputs()):
@@ -91,7 +91,7 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e
                 elif parent is None:
                     for input_idx in list(range(node.get_operator().nb_inputs())):
                         if input_idx not in [item[1] for item in parents_inputs]:
-                                parents_inputs.append((None, input_idx))  
+                                parents_inputs.append((None, input_idx))
 
             parents_inputs.sort(key=lambda x: x[1])
             node_dict['parents'] = parents_inputs
@@ -103,15 +103,15 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e
                         if child.get_operator().get_input(input_idx).dims() == node.get_operator().get_output(output_idx).dims():
                             children_outputs.append((child.name(), output_idx))
             node_dict['children'] = children_outputs
-        
+
             # Check if my node is a metaop
             attributes_dict = {}
-            if isinstance(node.get_operator(), aidge_core.MetaOperator_Op):
+            if isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
                 attributes_dict['micro_graph'] = []
                 for micro_node in node.get_operator().get_micro_graph().get_nodes():
-                    micro_node_dict = {'name' : micro_node.name(), 
+                    micro_node_dict = {'name' : micro_node.name(),
                                         'optype' : micro_node.type()}
-                    
+
                     micro_node_attr_dict =  _retrieve_operator_attrs(micro_node)
                     micro_node_dict['attributes'] = micro_node_attr_dict
                     attributes_dict['micro_graph'].append(micro_node_dict)
@@ -124,7 +124,7 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e
 
             if node.type() == 'Producer':
                 if write_trainable_params_ext:
-                    
+
                     params_file_format.casefold()
 
                     if params_file_format=='npz':
@@ -134,14 +134,14 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e
                     elif params_file_format=='json':
                         tensor = np.array(node.get_operator().get_output(0))
                         tensor_dict = {
-                            node.name() : 
+                            node.name() :
                             {
                                 'dims' : tensor.shape,
                                 'data_type' : str(tensor.dtype),
                                 'tensor_data' : tensor.tolist()
-                            }   
+                            }
                         }
-                                   
+
                         with open(Path(path_trainable_params, node.name() + '.json'), 'w') as fp:
                             json.dump(tensor_dict, fp, indent=4)
 
@@ -150,10 +150,10 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e
                     else:
                         raise Exception("File format to write trainable parameters not recognized.")
 
-                
+
                 elif write_trainable_params_embed:
                     node_dict['tensor_data'] = np.array(node.get_operator().get_output(0)).tolist()
-                
+
                 else:
                     pass
 
@@ -161,13 +161,13 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e
 
         else: # node is None
             pass
-    
+
     return graphview_dict
 
 def _write_dict_json(graphview_dict : dict[str, int, float, bool, None], json_path : str) -> None:
     """
     Writes dictionary containing GraphView description to a JSON file.
-    
+
     :param graphview_dict: A dictionary with the GraphView description.
     :type graphview_dict: dict[str, int, float, bool, None]
     :param json_path: Path to write JSON file.
@@ -178,18 +178,18 @@ def _write_dict_json(graphview_dict : dict[str, int, float, bool, None], json_pa
         json.dump(graphview_dict, fp, indent=4)
 
     return None
-    
-def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainable_params_embed : bool = False, write_trainable_params_ext : bool = False, params_file_format : str = 'json') -> None:   
+
+def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainable_params_embed : bool = False, write_trainable_params_ext : bool = False, params_file_format : str = 'json') -> None:
     """
     Generates the description for a GraphView in the JSON format.
-    
+
     :param graph: A GraphView of Aidge.
     :type graph: aidge_core.GraphView
     :param json_path: Path to write JSON file.
     :type json_path: Path
-    :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed). 
+    :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed).
     :type write_trainable_params_embed: bool, optional
-    :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file. 
+    :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file.
     :type write_trainable_params_ext: bool, optional
     :param params_file_format: Format of the external file used to store the Nodes' trainable parameters. Options: ``npz`` or ``json``. Default : ``json``. Requires ``write_trainable_params_ext``.
     :type params_file_format: str, optional
@@ -201,7 +201,7 @@ def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainabl
     elif not json_path.is_dir():
         if json_path.suffix == '.json':
             pass
-        else: 
+        else:
             raise Exception('If ``json_path`` contains a filename it must be of JSON format.')
 
     if write_trainable_params_ext:
@@ -212,14 +212,14 @@ def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainabl
     if isinstance(gview, aidge_core.GraphView):
         # Sort GraphView in topological order
         ordered_nodes = gview.get_ordered_nodes()
-    
-        # Create dict from GraphView 
+
+        # Create dict from GraphView
         graphview_dict = _create_dict(ordered_nodes, write_trainable_params_embed, write_trainable_params_ext, path_trainable_params, params_file_format)
-        
+
         # Write dict to JSON
         _write_dict_json(graphview_dict, json_path)
 
     else:
         raise Exception("Graph must be an instance of aidge_core.GraphView.")
-        
+
     return None
\ No newline at end of file
diff --git a/aidge_core/unit_tests/test_export.py b/aidge_core/unit_tests/test_export.py
index 57a15586873fae10beb428d0cd3cb41267a45d2f..d98a6fdbc20dd7e99169422205a4e680350aed27 100644
--- a/aidge_core/unit_tests/test_export.py
+++ b/aidge_core/unit_tests/test_export.py
@@ -80,6 +80,7 @@ class test_export(unittest.TestCase):
         )
 
         initFiller(model)
+        model.forward_dims([[1, 32*32*3]])
 
         # Preserve previously generated build if present
         tree_move(self.BUILD_DIR, self.TMP_BUILD_DIR, ignore_missing=True, exist_ok=True)
@@ -87,7 +88,7 @@ class test_export(unittest.TestCase):
         tree_remove(self.INSTALL_DIR, ignore_missing=True)
 
         # Export model
-        aidge_core.export(self.EXPORT_PATH, model)
+        aidge_core.serialize_to_cpp(self.EXPORT_PATH, model)
         self.assertTrue(self.EXPORT_PATH.is_dir(), "Export folder has not been generated")
         # Add other source files
         shutil.copyfile(pathlib.Path(__file__).parent / "static/main.cpp", self.EXPORT_PATH / "main.cpp")
diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py
index 26d60f2fbaf0f3903baf191cf0a2ad5550fb3275..4d3b0a34485e7e0d7e0323f2fc121a83b1882de9 100644
--- a/aidge_core/unit_tests/test_impl.py
+++ b/aidge_core/unit_tests/test_impl.py
@@ -49,8 +49,8 @@ class test_OperatorImpl(unittest.TestCase):
         """Test registering an implementation
         """
         global GLOBAL_CPT
-        aidge_core.register_ConvOp2D("cpu", testImpl)
-        self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
+        aidge_core.register_Conv2DOp("cpu", testImpl)
+        self.assertTrue("cpu" in aidge_core.get_keys_Conv2DOp())
         conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
         conv.get_operator().set_backend("cpu")
         conv.get_operator().set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
@@ -61,9 +61,9 @@ class test_OperatorImpl(unittest.TestCase):
         """Test registering an implementation
         """
         global GLOBAL_CPT
-        aidge_core.register_ConvOp2D("cpu", testImpl)
+        aidge_core.register_Conv2DOp("cpu", testImpl)
         aidge_core.register_ProducerOp("cpu", testImpl)
-        self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
+        self.assertTrue("cpu" in aidge_core.get_keys_Conv2DOp())
         conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
         model = aidge_core.sequential([conv])
         model.set_backend("cpu")
diff --git a/aidge_core/unit_tests/test_recipes.py b/aidge_core/unit_tests/test_recipes.py
index c8dd4c727fbaf8224e8d04111a5054caeb5e5c99..f4dd0220ecdc5950e1b1dcef0d8bf2d4782216bf 100644
--- a/aidge_core/unit_tests/test_recipes.py
+++ b/aidge_core/unit_tests/test_recipes.py
@@ -46,9 +46,9 @@ class test_recipes(unittest.TestCase):
 
     def test_fuse_matmul_add(self):
         matmul0 = aidge_core.MatMul(name="MatMul0")
-        add0 = aidge_core.Add(2, name="Add0")
+        add0 = aidge_core.Add(name="Add0")
         matmul1 = aidge_core.MatMul(name="MatMul1")
-        add1 = aidge_core.Add(2, name="Add1")
+        add1 = aidge_core.Add(name="Add1")
         w0 = aidge_core.Producer([1, 1], name="W0")
         w0.add_child(matmul0, 0, 0)
         b0 = aidge_core.Producer([1], name="B0")
diff --git a/aidge_core/unit_tests/test_topological_order.py b/aidge_core/unit_tests/test_topological_order.py
index 8e7f2e2d9b9770c2fae1e5c2812ba33113589134..01a69409e86c486ec2fb8c8bdb2a18ab0e3d9c1c 100644
--- a/aidge_core/unit_tests/test_topological_order.py
+++ b/aidge_core/unit_tests/test_topological_order.py
@@ -29,7 +29,7 @@ class test_topological_order(unittest.TestCase):
         loop0.get_operator().set_back_edges({1})
         assert not loop0.get_operator().is_back_edge(0)
         assert loop0.get_operator().is_back_edge(1)
-        add0 = aidge_core.Add(2, "add0")
+        add0 = aidge_core.Add("add0")
 
         loop0.add_child(add0, 0, 1)
         add0.add_child(loop0, 0, 1)
@@ -50,7 +50,7 @@ class test_topological_order(unittest.TestCase):
         loop0.get_operator().set_back_edges({0})
         assert not loop0.get_operator().is_back_edge(1)
         assert loop0.get_operator().is_back_edge(0)
-        add0 = aidge_core.Add(2, "add0")
+        add0 = aidge_core.Add("add0")
 
         loop0.add_child(add0, 0, 1)
         add0.add_child(loop0, 0, 0)
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 4af7da64ebca3c02eb9aabca1f2dad88fd8b9829..649898dd130d5811f65f65af87bc117d3502647c 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -28,7 +28,7 @@ class Operator;
 
 /**
  * @brief ImplSpec stores the requirements or the specifications of an implementation.
- * 
+ *
  */
 struct ImplSpec {
     struct IOSpec {
@@ -73,10 +73,15 @@ inline bool operator<(const ImplSpec& lhs, const ImplSpec& rhs) {
         || (lhs.inputs == rhs.inputs && lhs.outputs == rhs.outputs && lhs.attrs < rhs.attrs);
 }
 
+
+inline bool operator==(const ImplSpec& lhs, const ImplSpec& rhs) {
+    return !(lhs < rhs) && !(rhs < lhs);
+}
+
 /**
  * @brief Impl stores the details of a specific implementation.
  * It is associated to a ImplSpec in a registry.
- * 
+ *
  */
 template <class FwdFunc, class BwdFunc>
 struct Impl {
@@ -108,7 +113,7 @@ public:
     /**
      * @brief Get the operator required implementation specification, according
      * to the current operator configuration.
-     * 
+     *
      */
     ImplSpec getRequiredSpec() const;
 
@@ -116,15 +121,15 @@ public:
      * @brief Get the best implementation that matches \p requiredSpecs.
      * If no implementation matches \p requiredSpecs, \p requiredSpecs is
      * returned.
-     * 
+     *
      */
     ImplSpec getBestMatch(const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Get an adapted meta operator corresponding to the required 
+     * @brief Get an adapted meta operator corresponding to the required
      * specifications \p requiredSpecs from the implementation specifications
      * \p spec.
-     * 
+     *
      * @param spec Implementation specification
      * @param requiredSpecs Required specifications
      * @return std::shared_ptr<Node> Adapted meta op or nullptr
@@ -132,12 +137,12 @@ public:
     std::shared_ptr<Node> getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Get the best adapted meta operator corresponding to the required 
+     * @brief Get the best adapted meta operator corresponding to the required
      * specifications \p requiredSpecs.
      * The best adaptation is the one with the lowest overhead cost.
-     * Currently, it is the one requiring the least number of additionnal 
+     * Currently, it is the one requiring the least number of additionnal
      * operators to match the available implementations.
-     * 
+     *
      * @param requiredSpecs Required specifications
      * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
@@ -147,7 +152,7 @@ public:
 
 protected:
     virtual std::shared_ptr<ProdConso> getProdConso() const;
-    virtual std::set<ImplSpec> getAvailableImplSpecs() const;
+    virtual std::vector<ImplSpec> getAvailableImplSpecs() const;
     bool checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const;
 
     const Operator &mOp;
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index efdb06c4ac6d0e6898d899cc639a88d1da301000..c025ad770809864ac4e2d2c38e616e3d95e3d96a 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -424,16 +424,19 @@ public:
         addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
     }
 
-    inline void updateNodeName(NodePtr nodeToRename, const std::string& newName){
-        const std::string& oldName = nodeToRename->name();
-        AIDGE_ASSERT(mNodeRegistry.find(newName) != mNodeRegistry.end(), "Name {} is already used in graph {}.", newName, name());
-
-        if (nodeToRename->name() != ""){ // Case node already had a name
-            AIDGE_ASSERT(mNodeRegistry.find(oldName) != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", oldName, name());
-            mNodeRegistry[newName] = mNodeRegistry[oldName];
-            mNodeRegistry.erase(oldName);
-        }else{ // Case node did not had a name
-            mNodeRegistry[newName] = nodeToRename;
+    inline void updateNodeName(const std::shared_ptr<Node>& node, const std::string& newName){
+        if (!newName.empty()) {
+            auto itNew = mNodeRegistry.insert(std::make_pair(newName, node));
+            if (!itNew.second) {
+                Log::notice("Replacing existing node name in graph node name registry: {}", newName);
+                (itNew.first)->second = node;
+            }
+        }
+
+        if (!node->name().empty()) {
+            const auto it = mNodeRegistry.find(node->name());
+            AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", node->name(), name());
+            mNodeRegistry.erase(it);
         }
     }
 
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index e014b041fdad94f5f17d636a2da92180de59e152..51cc9c444edf03febf4416149e9160df0bbfca9c 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -76,7 +76,7 @@ public:
    * @param attrs Attributes for the Node.
    */
   Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs);
-  Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs);
+//   Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs);
 
   /**
    * @brief Construct a new Node object associated with the input Operator.
@@ -124,11 +124,14 @@ public:
   //        INNER
   ///////////////////////////////////////////////////////
 
+  inline std::shared_ptr<DynamicAttributes> attributes() const {
+    return mAttrs;
+  }
   /**
    * @brief Name of the Node.
    * @return std::string
    */
-  inline std::string name() const noexcept { return (mAttrs->hasAttr("name")) ? mAttrs->getAttr<std::string>("name") : ""; }
+  inline std::string name() const noexcept { return mAttrs->getAttr<std::string>("name"); }
 
   /**
    * @brief Set the Node name.
@@ -173,6 +176,7 @@ public:
    * @return std::shared_ptr<Operator>
    */
   inline std::shared_ptr<Operator> getOperator() const { return (*mOperator)(mAttrs); }
+//   inline std::shared_ptr<Operator> getOperator() const { return mOperator; }
 
   ///////////////////////////////////////////////////////
   //        TENSOR MANAGEMENT
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index f96996079b9e89f80c78b8e409830369480705a8..827fc0c2732695364aa2393692d7040b8b1a0e9f 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -29,7 +29,7 @@ class Add_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Add_Op(const IOIndex_t nbIn);
+    Add_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -66,7 +66,7 @@ public:
     }
 };
 
-std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "");
+std::shared_ptr<Node> Add(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_ADD_H_ */
diff --git a/include/aidge/operator/Atan.hpp b/include/aidge/operator/Atan.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f9c7d09e7a49cd8687166006eea75510aeda57ee
--- /dev/null
+++ b/include/aidge/operator/Atan.hpp
@@ -0,0 +1,52 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ATAN_H_
+#define AIDGE_CORE_OPERATOR_ATAN_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Atan_Op : public OperatorTensor,
+    public Registrable<Atan_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Atan_Op&)>> {
+public:
+    static const std::string Type;
+
+    Atan_Op();
+
+    Atan_Op(const Atan_Op& op);
+
+    std::shared_ptr<Operator> clone() const override;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> Atan(const std::string& name = "");
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ATAN_H_ */
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 744dbd1327a83267b7840e03ba83190326ee6cdd..b915cb8f16546e6626e99e41f5f9ebb1c038863e 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -28,7 +28,7 @@
 
 namespace Aidge {
 class MetaOperator_Op : public OperatorTensor,
-                public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::function<std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)>> {
+                public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::function<std::shared_ptr<OperatorImpl>(const MetaOperator_Op &)>> {
 public:
     // outputs shared with micro-graph output Tensors
     // Micro-graph handling:
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 750a808aaeb23447578501f8b27c7eba3d34234c..481a7795e24acd006acfc66a0fccde1b8da747e7 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -126,7 +126,7 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
         MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
     });
 
-    return MetaOperator("PaddedMaxPooling", graph, {}, name);
+    return MetaOperator(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name);
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
@@ -140,7 +140,7 @@ inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<Dim
         MaxPooling(kernel_dims, "", stride_dims, ceil_mode)
     });
 
-    return std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph);
+    return std::make_shared<MetaOperator_Op>(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph);
 }
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 2c670bf23d4703a5a9e8502c8b356fdde32e2561..bc1852ec0759ffaafa015143f22b0a1c8f6c893e 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -21,6 +21,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -47,7 +48,7 @@ public:
     Pad_Op() = delete;
 
     constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
-                     const PadBorderType &borderType = PadBorderType::Constant,
+                     PadBorderType borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
@@ -92,7 +93,7 @@ public:
 template <std::array<DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                         const std::string& name = "",
-                        const PadBorderType &borderType = PadBorderType::Constant,
+                        PadBorderType borderType = PadBorderType::Constant,
                         double borderValue = 0.0);
 
 // helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction
@@ -100,7 +101,7 @@ template <DimSize_t DIM>
 inline std::shared_ptr<Node> Pad(
     DimSize_t const (&beginEndTuples)[2*DIM],
     const std::string& name = "",
-    const PadBorderType &borderType = PadBorderType::Constant,
+    PadBorderType borderType = PadBorderType::Constant,
     double borderValue = 0.0)
 {
     return Pad<DIM>(to_array(beginEndTuples), name, borderType, borderValue);
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 9b264c1d3d7955f71538dd90f105cfd7ee469d0a..a9a84a3ee80eea5c0032fa08bce4ab96c44dba04 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -25,16 +25,34 @@
 
 namespace Aidge {
 
-class ReLU_Op : public OperatorTensor,
-    public Registrable<ReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>> {
+/**
+ * @brief Description of an element-wise Rectified Linear Unit (ReLU) operation
+ * on an input Tensor.
+ *
+ * For each element x in the input, the function is defined as:
+ * `f(x) = max(0, x)`
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
+class ReLU_Op :
+    public OperatorTensor,
+    public Registrable<ReLU_Op,  // <Op, backend, implementation creation function>
+        std::string,
+        std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>>
+{
 public:
     static const std::string Type;
 
     ReLU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op ReLU_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     ReLU_Op(const ReLU_Op& op);
 
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index 24bc3321673f4dcffd3e3663f7e0a0e584389492..ceef45bf0a12315354c8b7bf378c2da834b867b1 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class Sigmoid_Op : public OperatorTensor,
-    public Registrable<Sigmoid_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Sigmoid_Op&)>> {
+    public Registrable<Sigmoid_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Sigmoid_Op&)>> {
 public:
     static const std::string Type;
 
@@ -50,4 +50,4 @@ public:
 std::shared_ptr<Node> Sigmoid(const std::string& name = "");
 }
 
-#endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */
diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp
index a7c0ed5ae73d1f891744e835f0da5ad14a37f850..fce8d7f6548aaeb04300291d33cc2a5e44fb6fe7 100644
--- a/include/aidge/scheduler/ProdConso.hpp
+++ b/include/aidge/scheduler/ProdConso.hpp
@@ -42,10 +42,14 @@ public:
      */
     virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
-    // Amount of input data that cannot be overwritten during the execution.
+    /**
+     * @brief Amount of input data that cannot be overwritten during the execution.
+     */
     virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
 
-    // Memory required at an output for a given input size.
+    /**
+     * @brief Memory required at an output for a given input size.
+     */
     virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
 
     /**
diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp
index 04172c3ff68641a9fe0d14f9a326cd17e7002912..49e45ed7e447c00cf7300e8228ee7d1b04800083 100644
--- a/python_binding/backend/pybind_OperatorImpl.cpp
+++ b/python_binding/backend/pybind_OperatorImpl.cpp
@@ -55,9 +55,9 @@ public:
         );
     }
 
-    std::set<ImplSpec> getAvailableImplSpecs() const noexcept override {
+    std::vector<ImplSpec> getAvailableImplSpecs() const noexcept override {
         PYBIND11_OVERRIDE_NAME(
-            std::set<ImplSpec>,
+            std::vector<ImplSpec>,
             OperatorImpl,
             "get_available_impl_specs",
             getAvailableImplSpecs
@@ -81,6 +81,13 @@ void init_OperatorImpl(py::module& m){
     .def(py::init<const DynamicAttributes&>(), py::arg("attr") = DynamicAttributes())
     .def(py::init<const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("io"), py::arg("attr") = DynamicAttributes())
     .def(py::init<const ImplSpec::IOSpec&, const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("i"), py::arg("o"), py::arg("attr") = DynamicAttributes())
+    .def("__eq__", static_cast<bool(*)(const ImplSpec&, const ImplSpec&)>(&operator==))
+    .def("__repr__", [](ImplSpec self){
+        return fmt::format("{}\n", self);
+    })
+    .def_readwrite("inputs", &ImplSpec::inputs)
+    .def_readwrite("outputs", &ImplSpec::outputs)
+    .def_readwrite("attrs", &ImplSpec::attrs)
     ;
 
     py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr())
@@ -98,4 +105,4 @@ void init_OperatorImpl(py::module& m){
     .def("get_available_impl_specs", &OperatorImpl_Publicist::getAvailableImplSpecs)
     ;
 }
-}
+} // namespace Aidge
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index e91f345d7974cb06aa7aec9e27300b9cf9230985..b1e879d8387a71a3819ee7e0f8bbcd1e9936c146 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -62,5 +62,9 @@ void init_Data(py::module& m){
 
     py::class_<Data, std::shared_ptr<Data>>(m,"Data");
 
+
+    m.def("format_as", (const char* (*)(DataType)) &format_as, py::arg("dt"));
+    m.def("format_as", (const char* (*)(DataFormat)) &format_as, py::arg("df"));
+
 }
 }
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 2feaa1f8b8ecd50e1f2570107af1e62fc4f1f457..7e29cbb04f63bf99d86f63004dfede452a7a8ce0 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -331,6 +331,8 @@ void init_Tensor(py::module& m){
     .def("get_idx", &Tensor::getIdx)
     .def_static("get_available_backends", &Tensor::getAvailableBackends)
     .def("undefined", &Tensor::undefined)
+    .def("cpy_transpose", (void (Tensor::*)(const Tensor& src, const std::vector<DimSize_t>& transpose)) &Tensor::copyTranspose, py::arg("src"), py::arg("transpose"))
+
     .def("__str__", [](Tensor& b) {
         if (b.empty()) {
             return std::string("{}");
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index cd9b2a16f92a4e7ccd2a0f2f17e605a6b049c752..febb6f2ed594174a7aeef60f26b8f9a5ee0e23e3 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -30,6 +30,8 @@ void init_GraphView(py::module& m) {
           :param path: save location
           :type path: str
           )mydelimiter")
+          .def("inputs", (std::vector<std::pair<NodePtr, IOIndex_t>> (GraphView::*)() const) &GraphView::inputs)
+          .def("outputs", (std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> (GraphView::*)() const) &GraphView::outputs)
           .def("in_view", (bool (GraphView::*)(const NodePtr&) const) &GraphView::inView)
           .def("in_view", (bool (GraphView::*)(const std::string&) const) &GraphView::inView)
           .def("root_node", &GraphView::rootNode)
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 8a00a1cb4a419f1125411b5b1c823bf91570d62e..f8adfd5f4becb7677b3a59791f8549bb114fbbc4 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -22,14 +22,14 @@ namespace Aidge {
 
 void declare_Add(py::module &m) {
   py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
-    .def(py::init<const IOIndex_t>(), py::arg("nb_inputs"))
+    .def(py::init<>())
     .def_static("get_inputs_name", &Add_Op::getInputsName)
     .def_static("get_outputs_name", &Add_Op::getOutputsName)
     .def_readonly_static("Type", &Add_Op::Type);
 
   declare_registrable<Add_Op>(m, "AddOp");
 
-  m.def("Add", &Add, py::arg("nb_inputs"), py::arg("name") = "");
+  m.def("Add", &Add, py::arg("name") = "");
 }
 
 void init_Add(py::module &m) {
diff --git a/python_binding/operator/pybind_Atan.cpp b/python_binding/operator/pybind_Atan.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e9e277fcc336b8d4436aaf8dc1a834e666e400ae
--- /dev/null
+++ b/python_binding/operator/pybind_Atan.cpp
@@ -0,0 +1,31 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Atan.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Atan(py::module& m) {
+    py::class_<Atan_Op, std::shared_ptr<Atan_Op>, OperatorTensor>(m, "AtanOp", py::multiple_inheritance())
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Atan_Op::getInputsName)
+    .def_static("get_outputs_name", &Atan_Op::getOutputsName);
+
+    declare_registrable<Atan_Op>(m, "AtanOp");
+
+    m.def("Atan", &Atan, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index b98a642111402050fd3cba6dd8a12b11a3bbde8a..2d137c4de0c62ff1f8391b03cb07bc9230b7c9eb 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -28,7 +28,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 
-  const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D");
+  const std::string pyClassName("AvgPooling" + std::to_string(DIM) + "DOp");
   const std::string pyStaticAttrClassName("StaticAttributes" + pyClassName);
 //   py::class_<StaticAttributes<AvgPoolingAttr,
 //                                              std::array<DimSize_t, DIM>,
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index bc72825b2161d8733334817e095c251c788e7eba..3bd54da74f90a38b0255e021234786cd21e6c8a3 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
-  const std::string pyClassName("ConvOp" + std::to_string(DIM) + "D");
+  const std::string pyClassName("Conv" + std::to_string(DIM) + "DOp");
   py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 377d0fca5d78dff20b8df0cc0d5521eb9a3685a2..b69fee02a50b95e4abfda895580f3192e7876766 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -27,7 +27,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
-  const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D");
+  const std::string pyClassName("ConvDepthWise" + std::to_string(DIM) + "DOp");
   py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index b59a4c5574ce5e56af13f9aea13e7514c9402c22..00f6d26bd3ae49b26d72236b71372c1f557ddbdd 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -26,9 +26,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
-  const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D");
+  const std::string pyClassName("MaxPooling" + std::to_string(DIM) + "DOp");
   py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor>(
-    m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index afd682f3e546b408b231a14e55a7ba5432fef430..5f173068af0f1140830d458979ec924c38ade078 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -47,7 +47,7 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("no_bias")= false);
-    m.def(("PaddedConvOp" + std::to_string(DIM) + "D").c_str(), [](
+    m.def(("PaddedConv" + std::to_string(DIM) + "DOp").c_str(), [](
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
@@ -87,7 +87,7 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("no_bias") = false);
-  m.def(("PaddedConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(), [](
+  m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "DOp").c_str(), [](
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
@@ -121,7 +121,7 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0));
-  m.def(("PaddedAvgPoolingOp" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+  m.def(("PaddedAvgPooling" + std::to_string(DIM) + "DOp").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims)
     {
@@ -152,7 +152,7 @@ template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("ceil_mode") = false);
-  m.def(("PaddedMaxPoolingOp" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+  m.def(("PaddedMaxPooling" + std::to_string(DIM) + "DOp").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
                                                          bool ceil_mode)
@@ -194,7 +194,7 @@ void init_MetaOperatorDefs(py::module &m) {
 //   declare_PaddedMaxPoolingOp<3>(m);
   declare_LSTMOp(m);
 
-  py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperator_Op", py::multiple_inheritance())
+  py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperatorOp", py::multiple_inheritance())
   .def(py::init<const char *, const std::shared_ptr<GraphView>&, const std::vector<InputCategory>&>(),
           py::arg("type"),
           py::arg("graph"),
@@ -202,6 +202,8 @@ void init_MetaOperatorDefs(py::module &m) {
   .def("get_micro_graph", &MetaOperator_Op::getMicroGraph)
   .def("set_upper_node", &MetaOperator_Op::setUpperNode);
 
+  declare_registrable<MetaOperator_Op>(m, "MetaOperatorOp");
+
   m.def("meta_operator", &MetaOperator,
     py::arg("type"),
     py::arg("graph"),
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 04882b7f5b86c7c09ed8b8e5a15c4bfabd03bb55..7dc4a4bee1a009b3ca033ea29861768c1a6fc19d 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -25,12 +25,12 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
-  const std::string pyClassName("PadOp" + std::to_string(DIM) + "D");
+  const std::string pyClassName("Pad" + std::to_string(DIM) + "DOp");
   py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, 2*DIM> &,
-                const PadBorderType &,
+                PadBorderType,
                 double>(),
         py::arg("beginEndTuples"),
         py::arg("borderType") = PadBorderType::Constant,
@@ -42,7 +42,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
   declare_registrable<Pad_Op<DIM>>(m, pyClassName);
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
                                                         const std::string& name,
-                                                        const PadBorderType &borderType = PadBorderType::Constant,
+                                                        PadBorderType borderType = PadBorderType::Constant,
                                                         double borderValue = 0.0) {
         AIDGE_ASSERT(beginEndTuples.size() == 2*DIM, "begin_end_tuples size [{}] does not match DIM [{}]", beginEndTuples.size(), 2*DIM);
         return Pad<DIM>(to_array<2*DIM>(beginEndTuples.begin()), name, borderType, borderValue);
diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp
index db7fc7bfb60ff8360933e5f84ab54d4cec8df724..09e0e2fa2c1c46bafcd253267d5c33187de6bd69 100644
--- a/python_binding/operator/pybind_Sigmoid.cpp
+++ b/python_binding/operator/pybind_Sigmoid.cpp
@@ -25,6 +25,8 @@ void init_Sigmoid(py::module& m) {
     .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName)
     .def_readonly_static("Type", &Sigmoid_Op::Type);
 
+    declare_registrable<Sigmoid_Op>(m, "SigmoidOp");
+
     m.def("Sigmoid", &Sigmoid, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index bac071e02db82790917276c2121ff26a3c9bf514..6ddfa8cd532b94211d49825f1753c1c745a3e72e 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -31,6 +31,7 @@ void init_OperatorTensor(py::module&);
 void init_Add(py::module&);
 void init_And(py::module&);
 void init_ArgMax(py::module&);
+void init_Atan(py::module&);
 void init_AvgPooling(py::module&);
 void init_BatchNorm(py::module&);
 void init_BitShift(py::module&);
@@ -115,6 +116,7 @@ void init_Aidge(py::module& m) {
     init_Add(m);
     init_And(m);
     init_ArgMax(m);
+    init_Atan(m);
     init_AvgPooling(m);
     init_BatchNorm(m);
     init_BitShift(m);
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index e2215e704e32367a7ca273b067398bc19fc3fc01..dd5c5c110154427a8af7afbf70b2c76b61e507a8 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -131,11 +131,11 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
                 name = attrName.substr(0, qualifierPos - attrName.begin());
                 qualifier = attrName.substr(qualifierPos - attrName.begin() + 1);
             }
-
             const bool mandatory = (qualifier == "!");
             if (mandatory) {
                 // Required attribute:
                 if (!spec.attrs.hasAttr(name)) {
+                    Log::debug("Could not find mandatory attribute {} value {}.", name);
                     // Missing attribute
                     match = false;
                     break;
@@ -143,6 +143,7 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
                 else if (requiredSpecs.attrs.getAny(attrName) < spec.attrs.getAny(name)
                     || spec.attrs.getAny(name) < requiredSpecs.attrs.getAny(attrName))
                 {
+                    Log::debug("Attribute ({}) value mismatch {} != {}.", name, requiredSpecs.attrs.getAttr<std::string>(attrName), spec.attrs.getAttr<std::string>(name));
                     // Attribute value mismatch
                     match = false;
                     break;
@@ -168,6 +169,10 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
         Log::debug("  {}:{} - {}", (match) ? "MATCH" : "MISMATCH", priority, spec);
     }
 
+    if(matchingSpecs.empty()){
+        Log::debug("  No spec to match registered, returning requiredSpecs.");
+        return requiredSpecs;
+    }
     // Return best match
     const auto bestMatch = std::max_element(matchingSpecs.begin(), matchingSpecs.end());
     if (*bestMatch >= 0) {
@@ -374,6 +379,6 @@ std::shared_ptr<Aidge::ProdConso> Aidge::OperatorImpl::getProdConso() const {
     return std::make_shared<ProdConso>(mOp);
 }
 
-std::set<Aidge::ImplSpec> Aidge::OperatorImpl::getAvailableImplSpecs() const {
-    return std::set<ImplSpec>();
+std::vector<Aidge::ImplSpec> Aidge::OperatorImpl::getAvailableImplSpecs() const {
+    return std::vector<ImplSpec>();
 }
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index abfc91c6cdf9fd4f6eb46100074b22083514d82e..6f60d2f15ce0e561c32d7bc5a7561c2f8d507588 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -35,7 +35,7 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
     AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
-    auto add_ = Add_Op(2);
+    auto add_ = Add_Op();
     add_.associateInput(0, std::make_shared<Tensor>(*this));
     add_.associateInput(1, std::make_shared<Tensor>(other));
     add_.setDataType(dataType());
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index c19eab12ae34418386b1481702f64e4a82e9f771..da6d833f3aa933cd5e707814c279142de5bc4a23 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -18,6 +18,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/future_std/any.hpp"
 
 Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs)
     : mAttrs(attrs),
@@ -31,23 +32,18 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttribute
       mIdOutParents(
               std::vector<IOIndex_t>(static_cast<std::size_t>(op->nbInputs()), gk_IODefaultIndex))
 {
-    // ctor
-    if (op) {
-        mForward.push_back([this](){ this->mOperator->forward(); return true; });
-        mBackward.push_back([this](){ this->mOperator->backward(); return true; });
-    }
+    mForward.push_back([this](){ this->mOperator->forward(); return true; });
+    // mForward.push_back(std::bind(&Operator::forward, mOperator.get()));
+    mBackward.push_back([this](){ this->mOperator->backward(); return true; });
 }
 
-Aidge::Node::Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs)
-    : Node(op, std::make_shared<DynamicAttributes>(attrs)) {}
+// Aidge::Node::Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs)
+//     : Node(op, std::make_shared<DynamicAttributes>(attrs)) {}
 
 Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
-    : Node(op, DynamicAttributes())
+    : Node(op, std::make_shared<DynamicAttributes>(std::map<std::string, future_std::any>({std::make_pair("name", future_std::any(name))})))
 {
-    // ctor
-    if (!name.empty()) {
-        mAttrs->setAttr<std::string>("name", name);
-    }
+    //ctor
 }
 
 ///////////////////////////////////////////////////////
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 033c476c8a9e865fdf9d5670e295c3e4fb6101b3..f6fd0cd9fc647e29402d36f1f6838642e099ae6c 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -22,12 +22,10 @@
 
 const std::string Aidge::Add_Op::Type = "Add";
 
-Aidge::Add_Op::Add_Op(const IOIndex_t nbIn)
-    : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
+Aidge::Add_Op::Add_Op()
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1)
 {
-    if (nbIn == 0) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
-    }
+    // ctor
 }
 
 Aidge::Add_Op::Add_Op(const Add_Op& op)
@@ -89,6 +87,8 @@ std::set<std::string> Aidge::Add_Op::getAvailableBackends() const {
     return Registrar<Add_Op>::getKeys();
 }
 
-std::shared_ptr<Aidge::Node> Aidge::Add(const IOIndex_t nbIn, const std::string& name) {
-    return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Add(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Add_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Atan.cpp b/src/operator/Atan.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c0a494ee66fb11bcccac21141da30df5546f0b3c
--- /dev/null
+++ b/src/operator/Atan.cpp
@@ -0,0 +1,53 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Atan.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Atan_Op::Type = "Atan";
+
+Aidge::Atan_Op::Atan_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::Atan_Op::Atan_Op(const Aidge::Atan_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Atan_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Atan_Op::clone() const {
+    return std::make_shared<Atan_Op>(*this);
+}
+
+
+void Aidge::Atan_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<Atan_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Atan_Op::getAvailableBackends() const {
+    return Registrar<Atan_Op>::getKeys();
+}
+
+///////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Atan(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Atan_Op>(), name);
+}
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index f8c8e5e3f32fff8306184dfdf3baa87392479ebf..78266e3fb391d6f33da9e65b2125dd57885ac89e 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -24,7 +24,7 @@
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
+const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling" + std::to_string(DIM) + "D";
 
 
 template <Aidge::DimIdx_t DIM>
@@ -134,4 +134,4 @@ std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t
 }
 template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&);
 template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&);
-template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&);
\ No newline at end of file
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&);
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index bcf3b29c45abe2c40788fd1ec0bad87db8ee227b..b18be528795ccf470d7503ef1a915b6b66dc255c 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -24,7 +24,7 @@
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm";
+const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm" + std::to_string(DIM) + "D";
 
 template <Aidge::DimIdx_t DIM>
 Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
@@ -120,4 +120,4 @@ inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const Aidge::DimSize_t nbFe
 
 template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&);
 template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&);
-template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&);
\ No newline at end of file
+template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&);
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index e055c7e5ebb9a6cff9f774da444cc582ed7de34c..836c47645c20ff23539b836af8593cddfbb48498 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -24,7 +24,7 @@
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Conv_Op<DIM>::Type = "Conv";
+const std::string Aidge::Conv_Op<DIM>::Type = "Conv" + std::to_string(DIM) + "D";
 
 template <Aidge::DimIdx_t DIM>
 Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index f4d524356bd207a7ed101c2887c2fcda53f3bb83..d2a1c9e3d08d8e2c0400d436c6123aeb5f7ce66b 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -25,7 +25,7 @@
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
+const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise" + std::to_string(DIM) + "D";
 
 template <Aidge::DimIdx_t DIM>
 Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index 99ccb7505cd959178e4bd7132e32552ea5a72ecf..1e1db2f94948dfd1dd4c6219419b7989eeac8b3a 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -24,7 +24,7 @@
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Fold_Op<DIM>::Type = "Fold";
+const std::string Aidge::Fold_Op<DIM>::Type = "Fold" + std::to_string(DIM) + "D";
 
 template <Aidge::DimIdx_t DIM>
 Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op)
@@ -102,4 +102,4 @@ std::shared_ptr<Aidge::Node> Aidge::Fold(const std::array<Aidge::DimSize_t, DIM>
     return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
 }
 
-template std::shared_ptr<Aidge::Node> Aidge::Fold<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
+template std::shared_ptr<Aidge::Node> Aidge::Fold<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&);
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
index 5ce137fe6b6c0e4b7150bfc0f1182f6f8ee94850..535b53749caeffca34eb0bf541f06dee30a3a333 100644
--- a/src/operator/MaxPooling.cpp
+++ b/src/operator/MaxPooling.cpp
@@ -20,7 +20,7 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling";
+const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling" + std::to_string(DIM) + "D";
 
 template <Aidge::DimIdx_t DIM>
 Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index ab6bde74fb73011f7b49e6958d8cfa8320d0bc1b..d93d7d320f0f508b20714943ae3c8ed7fc561ec8 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -257,4 +257,4 @@ std::shared_ptr<Aidge::Node> Aidge::MetaOperator(const char *type,
     auto node = std::make_shared<Node>(op, name);
     op->setUpperNode(node);
     return node;
-}
\ No newline at end of file
+}
diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp
index 9620f040472aed984afb99018cde5476ec5f60d3..2ed548805010a6cc87950c4d1f7b89edbea4f75c 100644
--- a/src/operator/MetaOperatorDefs/LSTM.cpp
+++ b/src/operator/MetaOperatorDefs/LSTM.cpp
@@ -35,14 +35,14 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     auto input = Identity((!name.empty()) ? name + "_input" : "");
     auto hiddenState = Memorize(seqLength, (!name.empty()) ? name + "_hidden_state" : "");
     auto cellState = Memorize(seqLength, (!name.empty()) ? name + "_cell_state" : "");
-    auto add = Add(2, (!name.empty()) ? name + "_add" : "");
+    auto add = Add((!name.empty()) ? name + "_add" : "");
 
     // Forget gate
     auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateX" : "");
     input->addChild(forgetGateX, 0, 0);
     auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateH" : "");
     hiddenState->addChild(forgetGateH, 1, 0);
-    auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : "");
+    auto forgetGate = Add((!name.empty()) ? name + "_forgetGate" : "");
     forgetGateX->addChild(forgetGate, 0, 0);
     forgetGateH->addChild(forgetGate, 0, 1);
     auto forgetGateAct = Sigmoid((!name.empty()) ? name + "_forgetGateAct" : "");
@@ -57,7 +57,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     input->addChild(inputGateX, 0, 0);
     auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateH" : "");
     hiddenState->addChild(inputGateH, 1, 0);
-    auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : "");
+    auto inputGate = Add((!name.empty()) ? name + "_inputGate" : "");
     inputGateX->addChild(inputGate, 0, 0);
     inputGateH->addChild(inputGate, 0, 1);
     auto inputGateAct = Sigmoid((!name.empty()) ? name + "_inputGateAct" : "");
@@ -71,7 +71,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     input->addChild(cellCandidateX, 0, 0);
     auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateH" : "");
     hiddenState->addChild(cellCandidateH, 1, 0);
-    auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : "");
+    auto cellCandidate = Add((!name.empty()) ? name + "_cellCandidate" : "");
     cellCandidateX->addChild(cellCandidate, 0, 0);
     cellCandidateH->addChild(cellCandidate, 0, 1);
     auto cellCandidateAct = Tanh((!name.empty()) ? name + "_cellCandidateAct" : "");
@@ -83,7 +83,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     input->addChild(outputGateX, 0, 0);
     auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateH" : "");
     hiddenState->addChild(outputGateH, 1, 0);
-    auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : "");
+    auto outputGate = Add((!name.empty()) ? name + "_outputGate" : "");
     outputGateX->addChild(outputGate, 0, 0);
     outputGateH->addChild(outputGate, 0, 1);
     auto outputGateAct = Sigmoid((!name.empty()) ? name + "_outputGateAct" : "");
@@ -143,14 +143,14 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
     auto input = Identity("");
     auto hiddenState = Memorize(seqLength, "");
     auto cellState = Memorize(seqLength, "");
-    auto add = Add(2, "");
+    auto add = Add("");
 
     // Forget gate
     auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(forgetGateX, 0, 0);
     auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(forgetGateH, 1, 0);
-    auto forgetGate = Add(2, "");
+    auto forgetGate = Add("");
     forgetGateX->addChild(forgetGate, 0, 0);
     forgetGateH->addChild(forgetGate, 0, 1);
     auto forgetGateAct = Sigmoid("");
@@ -165,7 +165,7 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
     input->addChild(inputGateX, 0, 0);
     auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(inputGateH, 1, 0);
-    auto inputGate = Add(2, "");
+    auto inputGate = Add("");
     inputGateX->addChild(inputGate, 0, 0);
     inputGateH->addChild(inputGate, 0, 1);
     auto inputGateAct = Sigmoid("");
@@ -179,7 +179,7 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
     input->addChild(cellCandidateX, 0, 0);
     auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(cellCandidateH, 1, 0);
-    auto cellCandidate = Add(2, "");
+    auto cellCandidate = Add("");
     cellCandidateX->addChild(cellCandidate, 0, 0);
     cellCandidateH->addChild(cellCandidate, 0, 1);
     auto cellCandidateAct = Tanh("");
@@ -191,7 +191,7 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
     input->addChild(outputGateX, 0, 0);
     auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(outputGateH, 1, 0);
-    auto outputGate = Add(2,"");
+    auto outputGate = Add("");
     outputGateX->addChild(outputGate, 0, 0);
     outputGateH->addChild(outputGate, 0, 1);
     auto outputGateAct = Sigmoid("");
diff --git a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
index c35d964d0cdd224e9d00eadf6e158bc87b4c776f..bcda67d0ce4c43e4936739affb9d681942062cb1 100644
--- a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
@@ -41,7 +41,7 @@ std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_
         AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
     });
 
-    return MetaOperator("PaddedAvgPooling", graph, {}, name);
+    return MetaOperator(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name);
 }
 
 template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
@@ -75,7 +75,7 @@ inline std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<Dim
         AvgPooling(kernel_dims, "", stride_dims)
     });
 
-    return std::make_shared<MetaOperator_Op>("PaddedAvgPooling", graph);
+    return std::make_shared<MetaOperator_Op>(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph);
 }
 
 template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
diff --git a/src/operator/MetaOperatorDefs/PaddedConv.cpp b/src/operator/MetaOperatorDefs/PaddedConv.cpp
index 49373341a3a7cd1dd764dbfcb385a1817079e8b0..ca769026d900868d372bd7207ad616e07041e857 100644
--- a/src/operator/MetaOperatorDefs/PaddedConv.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedConv.cpp
@@ -43,7 +43,7 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConv(Aidge::DimSize_t in_channels,
         Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
         std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "")
     });
-    auto metaOpNode = MetaOperator("PaddedConv", graph, {}, name);
+    auto metaOpNode = MetaOperator(("PaddedConv" + std::to_string(DIM) + "D").c_str(), graph, {}, name);
     addProducer(metaOpNode, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
     if (!no_bias) {
         addProducer(metaOpNode, 2, {out_channels}, "b");
@@ -63,7 +63,7 @@ std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op(
     auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
     auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
 
-    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
+    return std::make_shared<MetaOperator_Op>(("PaddedConv" + std::to_string(DIM) + "D").c_str(), Sequential({pad, conv}));
 }
 template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&);
 template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
diff --git a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
index 12d980b4073c115443fe0ed8db38f978aa98dcad..b68794fb9b1ce76ddc5bca8e7d697a7b98d9f141 100644
--- a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
@@ -40,7 +40,7 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(const Aidge::DimSize_t n
         Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
         std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv_depth_wise" : "")
     });
-    auto metaOpNode = MetaOperator("PaddedConvDepthWise", graph, {}, name);
+    auto metaOpNode = MetaOperator(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), graph, {},name);
     addProducer(metaOpNode, 1, append(nb_channels, append(Aidge::DimSize_t(1), kernel_dims)), "w");
     if (!no_bias) {
         addProducer(metaOpNode, 2, {nb_channels}, "b");
@@ -61,7 +61,7 @@ std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op(
     auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
     auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
 
-    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
+    return std::make_shared<MetaOperator_Op>(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), Sequential({pad, conv}));
 }
 template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&);
 template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
index 39f61e328bd3f98bc836604462bbfc064fbb93be..ba762da5737e986941e0c72196503415f7af29b7 100644
--- a/src/operator/Pad.cpp
+++ b/src/operator/Pad.cpp
@@ -20,7 +20,7 @@
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Pad_Op<DIM>::Type = "Pad";
+const std::string Aidge::Pad_Op<DIM>::Type = "Pad" + std::to_string(DIM) + "D";
 
 template <Aidge::DimIdx_t DIM>
 std::shared_ptr<Aidge::Operator> Aidge::Pad_Op<DIM>::clone() const {
@@ -61,16 +61,18 @@ std::set<std::string> Aidge::Pad_Op<DIM>::getAvailableBackends() const {
 template class Aidge::Pad_Op<1>;
 template class Aidge::Pad_Op<2>;
 
+////////////////////////////////////////////////////////////////////////////////
+
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::Pad(const std::array<Aidge::DimSize_t, 2*DIM> &beginEndTuples,
                                            const std::string& name,
-                                           const PadBorderType &borderType,
+                                           PadBorderType borderType,
                                            double borderValue)
 {
     AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", Pad_Op<DIM>::Type);
     return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
 }
 
-template std::shared_ptr<Aidge::Node> Aidge::Pad<1>(const std::array<Aidge::DimSize_t, 2> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
-template std::shared_ptr<Aidge::Node> Aidge::Pad<2>(const std::array<Aidge::DimSize_t, 4> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
-template std::shared_ptr<Aidge::Node> Aidge::Pad<3>(const std::array<Aidge::DimSize_t, 6> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
\ No newline at end of file
+template std::shared_ptr<Aidge::Node> Aidge::Pad<1>(const std::array<Aidge::DimSize_t, 2> &beginEndTuples, const std::string&, PadBorderType, double borderValue);
+template std::shared_ptr<Aidge::Node> Aidge::Pad<2>(const std::array<Aidge::DimSize_t, 4> &beginEndTuples, const std::string&, PadBorderType, double borderValue);
+template std::shared_ptr<Aidge::Node> Aidge::Pad<3>(const std::array<Aidge::DimSize_t, 6> &beginEndTuples, const std::string&, PadBorderType, double borderValue);
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
index a8b20d21ae1f6c7bfba1a9e52d039f292b6aa62e..b51b4f346c92f778fe0a044df187cd8d0d0f7304 100644
--- a/src/operator/Squeeze.cpp
+++ b/src/operator/Squeeze.cpp
@@ -102,7 +102,7 @@ bool Squeeze_Op::forwardDims(bool allowDataDependency) {
                        axis < static_cast<int8_t>(input_dims.size()),
                    "{} : Axis index OutOfBounds error, expected value "
                    "within size limits of input tensor : "
-                   "[-{},{}), got {}.",
+                   "[-{},{}], got {}.",
                    type(), input_dims.size(), input_dims.size() - 1, axis);
       auto temp =
           static_cast<DimIdx_t>(axis >= 0 ? axis : axis + input_dims.size());
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
index 43afd160e03395c65c4dcbe5504cb865da4ed8d8..f3353b45cd6a732fa456ea0585ec5d040d53ef31 100644
--- a/src/operator/Unsqueeze.cpp
+++ b/src/operator/Unsqueeze.cpp
@@ -80,7 +80,7 @@ bool Unsqueeze_Op::forwardDims(bool allowDataDependency) {
                      axis < static_cast<int8_t>(output_nb_dims),
                  "{} : Axis index OutOfBounds enrror, expected value "
                  "within size limits of input tensor : "
-                 "[-{},{}), got {}.",
+                 "[-{},{}], got {}.",
                  type(), output_nb_dims, output_nb_dims - 1, axis);
     axes_rectified_idx.push_back(
         static_cast<DimIdx_t>(axis >= 0 ? axis : axis + output_nb_dims));
diff --git a/src/recipes/ConvToMatMul.cpp b/src/recipes/ConvToMatMul.cpp
index 9b88ffc73204b44cf857213d1fdfff49b3191f73..70be33932295aab49653bdc2853f4411ded919b4 100644
--- a/src/recipes/ConvToMatMul.cpp
+++ b/src/recipes/ConvToMatMul.cpp
@@ -24,7 +24,7 @@
 #include "aidge/recipes/Recipes.hpp"
 
 size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) {
-    const auto matches = SinglePassGraphMatching(graphView).match("Conv");
+    const auto matches = SinglePassGraphMatching(graphView).match("Conv2D");
 
     size_t nbReplaced = 0;
     for (const auto& match : matches) {
@@ -75,7 +75,7 @@ size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) {
 
         // Handle bias
         if (convOp->getInput(2) && !convOp->getInput(2)->empty()) {
-            auto add = Add(2, (!convNode->name().empty()) ? convNode->name() + "_add" : "");
+            auto add = Add((!convNode->name().empty()) ? convNode->name() + "_add" : "");
             auto bReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{1, static_cast<int64_t>(convOp->getInput(2)->size()), 1, 1}}),
                 (!convNode->name().empty()) ? convNode->name() + "_b_reshape_shape_prod" : "",
                 true);
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 34722c19f8c0fddaffa7357136f1512a027e1617..4c4de25282c487d023f9c184b015ac332e716b7b 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -190,9 +190,10 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
 }
 
 void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::GraphView> graphView) {
-    auto matches = SinglePassGraphMatching(graphView).match("(Conv|ConvDepthWise|PaddedConv|PaddedConvDepthWise)->BatchNorm");
+    auto matches = SinglePassGraphMatching(graphView).match("(Conv2D|ConvDepthWise2D|PaddedConv2D|PaddedConvDepthWise2D)->BatchNorm2D");
 
     for (auto match : matches) {
+        fmt::println("Match !");
         auto rootNode = match.graph->rootNode();
         fuseBatchNorm(rootNode, *rootNode->getChildren().begin());
     }
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index a08808ee5e6c2657a76213dcff80cec53b23e7ee..2fa06cf23b3b681211208a3e5bbea9226f0930b8 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -447,10 +447,10 @@ TEST_CASE("[core/graph] GraphView(getOrderedNodes)", "[GraphView][getOrderedNode
     auto data1 = Producer({2}, "data1");
     auto data2 = Producer({2}, "data2");
     auto data3 = Producer({2}, "data3");
-    auto add1 = Add(2, "add1");
-    auto add2 = Add(2, "add2");
+    auto add1 = Add("add1");
+    auto add2 = Add("add2");
     auto split1 = Split(2, 0, {1, 1}, "split1");
-    auto add3 = Add(3, "add3");
+    auto add3 = Add("add3");
     auto g = std::make_shared<GraphView>("TestGraph");
     data1->addChild(add1);
     data2->addChild(add1);
@@ -508,9 +508,9 @@ TEST_CASE("[core/graph] GraphView(getOrderedNodes)", "[GraphView][getOrderedNode
 TEST_CASE("[core/graph] GraphView(getOrderedNodes) cyclic", "[GraphView][getOrderedNodes]") {
     auto data1 = Producer({2}, "data1");
     auto data2 = Producer({2}, "data2");
-    auto add1 = Add(2, "add1");
+    auto add1 = Add("add1");
     auto mem1 = Memorize(1, "mem1");
-    auto add2 = Add(2, "add2");
+    auto add2 = Add("add2");
     auto g = std::make_shared<GraphView>("TestGraph");
     data1->addChild(add1);
     data2->addChild(add1);
diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp
index 2fdcd611d378ceb6c3dbdc853920eecf92c31141..d6d98d4701cba900548d127879c9b3940cf1d739 100644
--- a/unit_tests/graph/Test_Matching.cpp
+++ b/unit_tests/graph/Test_Matching.cpp
@@ -51,10 +51,10 @@ TEST_CASE("[core/graph] Matching") {
         PaddedConv(8, 16, {3, 3}, "conv3", {1, 1}, {2, 2, 2, 2}),
         ReLU("relu3"),
         PaddedConv(8, 16, {5, 5}, "conv4", {1, 1}, {2, 2, 2, 2}),
-        Add(2, "add"),
+        Add("add"),
         PaddedConv(8, 16, {5, 5}, "conv5", {1, 1}, {2, 2, 2, 2}),
         ReLU("relu5"),
-        Add(2, "add2")
+        Add("add2")
     });
 
     g1->getNode("relu3")->addChild(g1->getNode("add"), 0, 1);
@@ -65,8 +65,8 @@ TEST_CASE("[core/graph] Matching") {
     expandMetaOps(g1);
     g1->save("Test_examples", true);
 
-    SECTION("Conv->(ReLU->Pad->Conv)*") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU->Pad->Conv)*");
+    SECTION("Conv2D->(ReLU->Pad2D->Conv2D)*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D->(ReLU->Pad2D->Conv2D)*");
 
         checkMatches(results, {
             {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "relu1", "relu2"}},
@@ -77,24 +77,24 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv->ReLU;ReLU->Pad") {
-        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv->ReLU;ReLU->Pad"));
+    SECTION("Conv2D->ReLU;ReLU->Pad2D") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv2D->ReLU;ReLU->Pad2D"));
     }
 
-    SECTION("Conv->ReLU#1;ReLU#2->Pad") {
-        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv->ReLU#1;ReLU#2->Pad"));
+    SECTION("Conv2D->ReLU#1;ReLU#2->Pad2D") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv2D->ReLU#1;ReLU#2->Pad2D"));
     }
 
-    SECTION("Conv?->ReLU") {
-        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv?->ReLU"));
+    SECTION("Conv2D?->ReLU") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv2D?->ReLU"));
     }
 
     SECTION("(Add#<*~.)*") {
         REQUIRE_THROWS(SinglePassGraphMatching(g1).match("(Add#<*~.)*"));
     }
 
-    SECTION("Conv->(ReLU~>Pad->Conv)*") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU~>Pad->Conv)*");
+    SECTION("Conv2D->(ReLU~>Pad2D->Conv2D)*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D->(ReLU~>Pad2D->Conv2D)*");
 
         checkMatches(results, {
             {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}},
@@ -105,8 +105,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv->(ReLU~>Pad->Conv)* [disjoint]") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU~>Pad->Conv)*", true);
+    SECTION("Conv2D->(ReLU~>Pad2D->Conv2D)* [disjoint]") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D->(ReLU~>Pad2D->Conv2D)*", true);
 
         checkMatches(results, {
             {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}},
@@ -114,8 +114,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv~>(ReLU~>Pad->Conv)*") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv~>(ReLU~>Pad->Conv)*");
+    SECTION("Conv~>(ReLU~>Pad2D->Conv2D)*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D~>(ReLU~>Pad2D->Conv2D)*");
 
         checkMatches(results, {
             {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}},
@@ -126,8 +126,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Pad->Conv#->ReLU;Conv#<1-Producer;Conv#<2-Producer") {
-        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;Conv#<1-Producer;Conv#<2-Producer");
+    SECTION("Pad2D->Conv2D#->ReLU;Conv2D#<1-Producer;Conv2D#<2-Producer") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#->ReLU;Conv2D#<1-Producer;Conv2D#<2-Producer");
 
         checkMatches(results, {
             {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
@@ -135,8 +135,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Pad->Conv#~>ReLU;Conv#<1-Producer;Conv#<2-Producer") {
-        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;Conv#<1-Producer;Conv#<2-Producer");
+    SECTION("Pad2D->Conv2D#~>ReLU;Conv2D#<1-Producer;Conv2D#<2-Producer") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#~>ReLU;Conv2D#<1-Producer;Conv2D#<2-Producer");
 
         checkMatches(results, {
             {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
@@ -145,8 +145,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Pad->Conv#~>ReLU;(Conv#<*-Producer){2}") {
-        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;(Conv#<*-Producer){2}");
+    SECTION("Pad2D->Conv2D#~>ReLU;(Conv2D#<*-Producer){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#~>ReLU;(Conv2D#<*-Producer){2}");
 
         checkMatches(results, {
             {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
@@ -155,8 +155,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Pad->Conv#->ReLU;(Conv#<*-Producer){2}") {
-        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;(Conv#<*-Producer){2}");
+    SECTION("Pad2D->Conv2D#->ReLU;(Conv2D#<*-Producer){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#->ReLU;(Conv2D#<*-Producer){2}");
 
         checkMatches(results, {
             {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
@@ -164,8 +164,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Pad->Conv#~>ReLU;(Conv#<*-.){2}") {
-        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;(Conv#<*-.){2}");
+    SECTION("Pad2D->Conv2D#~>ReLU;(Conv2D#<*-.){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#~>ReLU;(Conv2D#<*-.){2}");
 
         checkMatches(results, {
             {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
@@ -174,8 +174,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Pad->Conv#->ReLU;(Conv#<*-.){2}") {
-        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;(Conv#<*-.){2}");
+    SECTION("Pad2D->Conv2D#->ReLU;(Conv2D#<*-.){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#->ReLU;(Conv2D#<*-.){2}");
 
         checkMatches(results, {
             {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
@@ -184,7 +184,7 @@ TEST_CASE("[core/graph] Matching") {
     }
 
     SECTION("Conv#~>ReLU*;Conv#<-Pad*") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU*;Conv#<-Pad*");
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#~>ReLU*;Conv2D#<-Pad2D*");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -195,8 +195,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#->ReLU*;Conv#<-Pad*") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU*;Conv#<-Pad*");
+    SECTION("Conv2D#->ReLU*;Conv2D#<-Pad2D*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#->ReLU*;Conv2D#<-Pad2D*");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -207,8 +207,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#->ReLU?-*>Add#1?->ReLU?;Conv#<-Pad?;(Add#1<*-.)?") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU?-*>Add#1?->ReLU?;Conv#<-Pad?;(Add#1<*-.)?");
+    SECTION("Conv2D#->ReLU?-*>Add#1?->ReLU?;Conv2D#<-Pad2D?;(Add#1<*-.)?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#->ReLU?-*>Add#1?->ReLU?;Conv2D#<-Pad2D?;(Add#1<*-.)?");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -219,8 +219,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#~>ReLU?-*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*-.)?") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?-*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*-.)?");
+    SECTION("Conv2D#~>ReLU?-*>Add#1?~>ReLU?;Conv2D#<-Pad?;(Add#1<*-.)?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#~>ReLU?-*>Add#1?~>ReLU?;Conv2D#<-Pad2D?;(Add#1<*-.)?");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -231,8 +231,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#~>ReLU?~*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*~.)?") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?~*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*~.)?");
+    SECTION("Conv2D#~>ReLU?~*>Add#1?~>ReLU?;Conv2D#<-Pad2D?;(Add#1<*~.)?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#~>ReLU?~*>Add#1?~>ReLU?;Conv2D#<-Pad2D?;(Add#1<*~.)?");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -243,8 +243,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#->ReLU?;Conv#<-Pad?") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU?;Conv#<-Pad?");
+    SECTION("Conv2D#->ReLU?;Conv2D#<-Pad2D?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#->ReLU?;Conv2D#<-Pad2D?");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -255,8 +255,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#~>ReLU?;Conv#<-Pad?") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?;Conv#<-Pad?");
+    SECTION("Conv2D#~>ReLU?;Conv2D#<-Pad2D?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#~>ReLU?;Conv2D#<-Pad2D?");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -267,8 +267,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("(Conv|ReLU)->Add") {
-        const auto results = SinglePassGraphMatching(g1).match("(Conv|ReLU)->Add");
+    SECTION("(Conv2D|ReLU)->Add") {
+        const auto results = SinglePassGraphMatching(g1).match("(Conv2D|ReLU)->Add");
 
         checkMatches(results, {
             {"conv4_conv", {"add", "conv4_conv"}},
@@ -294,33 +294,33 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv~*>(ReLU&Add)") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv~*>(ReLU&Add)");
+    SECTION("Conv2D~*>(ReLU&Add)") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D~*>(ReLU&Add)");
 
         checkMatches(results, {
             {"conv5_conv", {"add2", "conv5_conv", "relu5"}}
         });
     }
 
-    SECTION("Conv~>(ReLU&Add)") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv~>(ReLU&Add)");
+    SECTION("Conv2D~>(ReLU&Add)") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D~>(ReLU&Add)");
         REQUIRE(results.size() == 0);
     }
 
-    SECTION("ReLU~*>((Pad->Conv-*>Add#)&Add#)") {
-        const auto results = SinglePassGraphMatching(g1).match("ReLU~*>((Pad->Conv-*>Add#)&Add#)");
+    SECTION("ReLU~*>((Pad2D->Conv2D-*>Add#)&Add#)") {
+        const auto results = SinglePassGraphMatching(g1).match("ReLU~*>((Pad2D->Conv2D-*>Add#)&Add#)");
 
         checkMatches(results, {
             {"relu3", {"add", "conv4_conv", "conv4_pad", "relu3"}}
         });
     }
 
-    SECTION("ReLU-*>((Pad->Conv-*>Add)&Add)") {
-        const auto results = SinglePassGraphMatching(g1).match("ReLU-*>((Pad->Conv-*>Add)&Add)");
+    SECTION("ReLU-*>((Pad2D->Conv2D-*>Add)&Add)") {
+        const auto results = SinglePassGraphMatching(g1).match("ReLU-*>((Pad2D->Conv2D-*>Add)&Add)");
         REQUIRE(results.size() == 0);
     }
 
-    SECTION("Pad->Conv[3x3]->ReLU") {
+    SECTION("Pad2D->Conv2D[3x3]->ReLU") {
         auto gm = SinglePassGraphMatching(g1);
         gm.addNodeLambda("3x3", [](const NodePtr& node) {
             const std::shared_ptr<Conv_Op<2>> op =
@@ -328,20 +328,20 @@ TEST_CASE("[core/graph] Matching") {
             return (op->kernelDims() == std::array<DimSize_t, 2>({3, 3}));
         });
 
-        const auto results = gm.match("Pad->Conv[3x3]->ReLU");
+        const auto results = gm.match("Pad2D->Conv2D[3x3]->ReLU");
 
         checkMatches(results, {
             {"conv3_pad", {"conv3_conv", "conv3_pad", "relu3"}}
         });
     }
 
-    SECTION(".[test]->Pad") {
+    SECTION(".[test]->Pad2D") {
         auto gm = SinglePassGraphMatching(g1);
         gm.addNodeLambda("test", [](const NodePtr& node) {
             return (node->type() == "Add" || (node->type() == "ReLU" && node->name() == "relu1"));
         });
 
-        const auto results = gm.match(".[test]->Pad");
+        const auto results = gm.match(".[test]->Pad2D");
 
         checkMatches(results, {
             {"add", {"add", "conv5_pad"}},
@@ -364,16 +364,16 @@ TEST_CASE("[core/graph] Matching") {
         Conv(1, 4, {5, 5}, "conv4")
     });
 
-    SECTION("((Conv#->(.[exBN]|$))|(FC#->(.[exFC])*->$))") {
+    SECTION("((Conv2D#->(.[exBN]|$))|(FC#->(.[exFC])*->$))") {
         auto gm = SinglePassGraphMatching(g2);
         gm.addNodeLambda("exBN", [](const NodePtr& node) {
-            return (node->type() != "BatchNorm");
+            return (node->type() != "BatchNorm2D");
         });
         gm.addNodeLambda("exFC", [](const NodePtr& node) {
             return (node->type() != "FC");
         });
 
-        const auto results = gm.match("((Conv#->(.[exBN]|$))|(FC#->(.[exFC])*->$))");
+        const auto results = gm.match("((Conv2D#->(.[exBN]|$))|(FC#->(.[exFC])*->$))");
 
         checkMatches(results, {
             {"conv2", {"conv2", "relu2"}},
@@ -396,13 +396,13 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#->(.[exConv])*->$") {
+    SECTION("Conv2D#->(.[exConv])*->$") {
         auto gm = SinglePassGraphMatching(g2);
         gm.addNodeLambda("exConv", [](const NodePtr& node) {
-            return (node->type() != "Conv");
+            return (node->type() != "Conv2D");
         });
 
-        const auto results = gm.match("Conv#->(.[exConv])*->$");
+        const auto results = gm.match("Conv2D#->(.[exConv])*->$");
 
         checkMatches(results, {
             {"conv4", {"conv4"}}
@@ -423,13 +423,13 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("(((FC#|Conv#)<-(.[exParam])*<-$)|((FC#|Conv#)->(.[exParam])*->$));(FC#|Conv#)<1-Producer#") {
+    SECTION("(((FC#|Conv2D#)<-(.[exParam])*<-$)|((FC#|Conv2D#)->(.[exParam])*->$));(FC#|Conv2D#)<1-Producer#") {
         auto gm = SinglePassGraphMatching(g2);
         gm.addNodeLambda("exParam", [](const NodePtr& node) {
-            return (node->type() != "FC" && node->type() != "Conv");
+            return (node->type() != "FC" && node->type() != "Conv2D");
         });
 
-        const auto results = gm.match("(((FC#|Conv#)<-(.[exParam])*<-$)|((FC#|Conv#)->(.[exParam])*->$));(FC#|Conv#)<1-Producer#");
+        const auto results = gm.match("(((FC#|Conv2D#)<-(.[exParam])*<-$)|((FC#|Conv2D#)->(.[exParam])*->$));(FC#|Conv2D#)<1-Producer#");
 
         checkMatches(results, {
             {"conv1", {"conv1", "conv1_w", "dataProvider"}},
@@ -437,13 +437,13 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv->ReLU [perf]") {
+    SECTION("Conv2D->ReLU [perf]") {
         const size_t nbTests = 3;
         std::mt19937::result_type seed(1);
 
         for (int test = 0; test < nbTests; ++test) {
             RandomGraph randGraph;
-            randGraph.types = {"Conv", "ReLU", "Dummy"};
+            randGraph.types = {"Conv2D", "ReLU", "Dummy"};
             randGraph.typesWeights = {0.4, 0.4, 0.2};
             randGraph.avgIn = 1;
             randGraph.maxIn = 1;
@@ -460,7 +460,7 @@ TEST_CASE("[core/graph] Matching") {
             auto gm = SinglePassGraphMatching(g1);
 
             const auto start = std::chrono::system_clock::now();
-            const auto results = gm.match("Conv->ReLU#;ReLU#->Dummy");
+            const auto results = gm.match("Conv2D->ReLU#;ReLU#->Dummy");
             const auto end = std::chrono::system_clock::now();
             const auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
 
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
index 68ac509e79e347106a9a132249f125ebe6e39f6a..79e471d44a49dfb52fd5eb4aa1ed2dc4ab8dc0bb 100644
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -153,9 +153,9 @@ TEST_CASE("GraphRegexUser") {
 
       // generate the original GraphView
         auto matmul0 = MatMul("matmul0");
-        auto add0 = Add(2, "add0");
+        auto add0 = Add("add0");
         auto matmul1 = MatMul("matmul1");
-        auto add1 = Add(2, "add1");
+        auto add1 = Add("add1");
 
         auto b0 = Producer({5}, "B0");
         auto w0 = Producer({5, 5}, "W0");
diff --git a/unit_tests/graphRegex/Test_examples.cpp b/unit_tests/graphRegex/Test_examples.cpp
index d85ae5c893a7ae4497125a62dad3cde97dac5195..0ccc05b5a957673167a16643b22bf047fc80f43f 100644
--- a/unit_tests/graphRegex/Test_examples.cpp
+++ b/unit_tests/graphRegex/Test_examples.cpp
@@ -40,7 +40,7 @@ TEST_CASE("Examples", "[GraphMatching]") {
 
     auto regex = std::make_shared<GraphRegex>();
     regex->setKeyFromGraph(g1);
-    regex->addQuery("Pad->Conv->ReLU");
+    regex->addQuery("Pad2D->Conv2D->ReLU");
     // Won't work, wrong number of matches:
     //regex->addQuery("Pad*->Conv->ReLU*");
 
@@ -52,4 +52,4 @@ TEST_CASE("Examples", "[GraphMatching]") {
     }
 }
 
-}  // namespace Aidge
\ No newline at end of file
+}  // namespace Aidge
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index d1b4e2e31e8c57e2c3eebd42019ba9f42c4d39e0..6711e1524fe0595b4effd68397f8cb684df590a9 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -34,10 +34,10 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
 
         REQUIRE(microGraph->getNodes().size() == 2);
         REQUIRE(microGraph->inputNodes().size() == 2);  // 2 because Conv has inputs outside the meta-op (Producers for weight and bias)
-        REQUIRE(nodePtrTo(microGraph->getOrderedInputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"Pad", 0}, {"Conv", 1}, {"Conv", 2}}));
-        REQUIRE(nodePtrTo(microGraph->getOrderedOutputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"Conv", 0}}));
+        REQUIRE(nodePtrTo(microGraph->getOrderedInputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"Pad2D", 0}, {"Conv2D", 1}, {"Conv2D", 2}}));
+        REQUIRE(nodePtrTo(microGraph->getOrderedOutputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"Conv2D", 0}}));
         REQUIRE(microGraph->outputNodes().size() == 1);
-        REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv");
+        REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv2D");
         REQUIRE(op->nbInputs() == 3);
         REQUIRE(op->inputCategory(0) == InputCategory::Data);
         REQUIRE(op->inputCategory(1) == InputCategory::Param);
diff --git a/unit_tests/operator/Test_Operator.cpp b/unit_tests/operator/Test_Operator.cpp
index a050bbc4021b0c70a0d8faf6478eb2bd13ebdb58..6bd12c51ef367ad1cf1859afc56af8a21a706237 100644
--- a/unit_tests/operator/Test_Operator.cpp
+++ b/unit_tests/operator/Test_Operator.cpp
@@ -26,7 +26,7 @@ namespace Aidge {
 // TEST_CASE("[core/operator] Operator(computeReceptiveField)", "[Operator][computeReceptiveFiled]") {
 //     auto dataProvider1 = Producer({16, 3, 224, 224}, "dataProvider1");
 //     auto dataProvider2 = Producer({16, 3, 224, 224}, "dataProvider2");
-//     auto gen1 = Add(2);
+//     auto gen1 = Add();
 //     auto gen2 = ReLU();
 
 //     auto g = std::make_shared<GraphView>("TestGraph");
diff --git a/unit_tests/recipes/Test_FuseToMetaOps.cpp b/unit_tests/recipes/Test_FuseToMetaOps.cpp
index 9fceedf2feef0a3ed79b83a8494a1a2b49f77291..7bb3ae63add6568f7de08a996b27a495a644cf46 100644
--- a/unit_tests/recipes/Test_FuseToMetaOps.cpp
+++ b/unit_tests/recipes/Test_FuseToMetaOps.cpp
@@ -35,7 +35,7 @@ TEST_CASE("[cpu/recipes] FuseToMetaOps", "[FuseToMetaOps][recipes]") {
     g1->save("FuseToMetaOps_before");
 
     // FIXME: GraphRegex also matches the Conv Producers, which are not in the query!
-    const auto nbFused = fuseToMetaOps(g1, "Conv->ReLU", "ConvReLU");
+    const auto nbFused = fuseToMetaOps(g1, "Conv2D->ReLU", "ConvReLU");
     g1->save("FuseToMetaOps_after", true);
 
     REQUIRE(nbFused == 2);
diff --git a/unit_tests/recipes/Test_LabelGraph.cpp b/unit_tests/recipes/Test_LabelGraph.cpp
index 78f67d823a17454c1ecff40a2307556c990c4f53..82dae3c48c137b12b8c0816fadcf40a1251137d7 100644
--- a/unit_tests/recipes/Test_LabelGraph.cpp
+++ b/unit_tests/recipes/Test_LabelGraph.cpp
@@ -44,11 +44,11 @@ TEST_CASE("[LabelGraph] conv") {
 
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
-        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling2D");
     }
 }
 
@@ -73,11 +73,11 @@ TEST_CASE("[LabelGraph] deleted node") {
 
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
-        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling2D");
     }
 
     SECTION("Check dimensions") {
@@ -111,11 +111,11 @@ TEST_CASE("[LabelGraph] deleted nodes") {
 
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
-        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling2D");
     }
 }
 
@@ -139,11 +139,11 @@ TEST_CASE("[LabelGraph] pooling") {
 
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
-        REQUIRE(g2->getNode("pool1")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("pool1")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("pool1")->getOperator()->getRawOutput(0) == g2->getNode("pool2")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("pool2")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("pool2")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("pool2")->getOperator()->getRawOutput(0) == g2->getNode("pool3")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("pool3")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("pool3")->getOperator()->type() == "MaxPooling2D");
     }
 
     SECTION("Check dimensions") {
diff --git a/unit_tests/recipes/Test_MatMulToFC.cpp b/unit_tests/recipes/Test_MatMulToFC.cpp
index 2adf882ca69e0d5ca5f050d1b89cfb09d81b536b..28eae0be17297467a29eab4e868e074c336d4a12 100644
--- a/unit_tests/recipes/Test_MatMulToFC.cpp
+++ b/unit_tests/recipes/Test_MatMulToFC.cpp
@@ -27,9 +27,9 @@ TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") {
     SECTION("with Add") {
         // generate the original GraphView
         auto matmul0 = MatMul("matmul0");
-        auto add0 = Add(2, "add0");
+        auto add0 = Add("add0");
         auto matmul1 = MatMul("matmul1");
-        auto add1 = Add(2, "add1");
+        auto add1 = Add("add1");
 
         auto b0 = Producer({5}, "B0");
         auto w0 = Producer({5, 5}, "W0");
@@ -76,7 +76,7 @@ TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") {
         // generate the original GraphView
         auto matmul0 = MatMul("matmul0");
         auto matmul1 = MatMul("matmul1");
-        auto add1 = Add(2, "add1");
+        auto add1 = Add("add1");
 
         auto w0 = Producer({5, 5}, "W0");
         auto b1 = Producer({5}, "B1");