diff --git a/.clang-tidy b/.clang-tidy
new file mode 100644
index 0000000000000000000000000000000000000000..77e8c9dbd6c7c95d38505acdfc45740403031597
--- /dev/null
+++ b/.clang-tidy
@@ -0,0 +1,188 @@
+---
+Checks: "google-*, readability-identifier-naming"
+WarningsAsErrors: ''
+HeaderFilterRegex: ''
+FormatStyle: none
+CheckOptions:
+  # Case style
+    # Following https://webkit.org/code-style-guidelines/ :
+    # CamelCase for class,struct, namespace
+    # camelBack for variables, functions
+  # CamelCase for class,struct, namespace
+  - key: 'readability-identifier-naming.AbstractClassCase'
+    value: 'CamelCase'
+  # CamelCase for class,struct, namespace
+  - key: 'readability-identifier-naming.ClassCase'
+    value: 'CamelCase'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ClassConstantCase'
+    value: 'camelBack'
+  - key: 'readability-identifier-naming.ClassConstantPrefix'
+    value: 's_'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ClassMemberCase'
+    value: 'camelBack'
+  - key: 'readability-identifier-naming.ClassMemberPrefix'
+    value: 's_'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ClassMethodCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ConstantCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ConstantMemberCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ConstantParameterCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ConstantPointerParameterCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ClassMemberCase'
+    value: 'camelBack'
+  - key: 'readability-identifier-naming.ClassMemberPrefix'
+    value: 's_'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ClassMethodCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ConstantCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ConstantMemberCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ConstantParameterCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ConstantPointerParameterCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ConstexprFunctionCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ConstexprMethodCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ConstexprVariableCase'
+    value: 'camelBack'
+  # CamelCase for class,struct, namespace
+  - key: 'readability-identifier-naming.EnumCase'
+    value: 'CamelCase'
+  # CamelCase for class,struct, namespace
+  - key: 'readability-identifier-naming.EnumConstantCase'
+    value: 'CamelCase'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.FunctionCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.GlobalConstantCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.GlobalConstantPointerCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.GlobalFunctionCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.GlobalPointerCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.GlobalVariableCase'
+    value: 'camelBack'
+  # CamelCase for class,struct, namespace
+  - key: 'readability-identifier-naming.InlineNamespaceCase'
+    value: 'CamelCase'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.LocalConstantCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.LocalConstantPointerCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.LocalPointerCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.LocalVariableCase'
+    value: 'camelBack'
+  - key: 'readability-identifier-naming.MacroDefinitionCase'
+    value: 'UPPER_CASE'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.MemberCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.MethodCase'
+    value: 'CamelCase'
+  # CamelCase for class,struct, namespace
+  - key: 'readability-identifier-naming.NamespaceCase'
+    value: 'CamelCase'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ParameterCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ParameterPackCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.PointerParameterCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.PrivateMemberCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.PrivateMemberPrefix'
+    value: 'm_'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.PrivateMethodCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ProtectedMemberCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ProtectedMemberPrefix'
+    value: 'm_'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ProtectedMethodCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.PublicMemberCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.PublicMethodCase'
+    value: 'camelBack'
+  - key: 'readability-identifier-naming.ScopedEnumConstantCase'
+    value: 'CamelCase'
+  - key: 'readability-identifier-naming.StaticConstantCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.StaticVariableCase'
+    value: 'camelBack'
+  - key: 'readability-identifier-naming.StructCase'
+    value: 'CamelCase'
+  - key: 'readability-identifier-naming.TemplateParameterCase'
+    value: 'CamelCase'
+  - key: 'readability-identifier-naming.TemplateTemplateParameterCase'
+    value: 'CamelCase'
+  - key: 'readability-identifier-naming.TypeTemplateParameterIgnoredRegexp'
+    value: 'expr-type'
+  # CamelCase for Type aliases
+  - key: 'readability-identifier-naming.TypeAliasCase'
+    value: 'CamelCase'
+  - key: 'readability-identifier-naming.TypedefCase'
+    value: 'CamelCase'
+  - key: 'readability-identifier-naming.TypeTemplateParameterCase'
+    value: 'CamelCase'
+  - key: 'readability-identifier-naming.UnionCase'
+    value: 'CamelCase'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.ValueTemplateParameterCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.VariableCase'
+    value: 'camelBack'
+  # camelBack for variables, functions
+  - key: 'readability-identifier-naming.VirtualMethodCase'
+    value: 'camelCase'
+...
diff --git a/.codespellrc b/.codespellrc
new file mode 100644
index 0000000000000000000000000000000000000000..156ac804789e34e3d78a272ad5a27973cb9adce8
--- /dev/null
+++ b/.codespellrc
@@ -0,0 +1,15 @@
+[codespell]
+builtin = clear,rare,en-GB_to_en-US,names,informal,code
+check-filenames =
+check-hidden =
+skip = */.git,*/build*,*/prefix,*/aidge_core,*/aidge_core.egg-info,*/cmake,*/future-std.clang-format
+quiet-level = 2
+# childs : used a lot and understandable
+# dOut,inH,ro : used for testing
+# deque : cpp data struct
+# inout : commented code variable
+# nd : commented code
+# neighbours : exception to the gb to us english rule
+# neighbouring : exception to the gb to us english rule
+# endcode : documentation keyword
+ignore-words-list = childs, dOut, inH, ro, deque, inout, stdio, nd, neighbours, neighbouring, endcode
diff --git a/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1 b/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1
index c2715ea5550432838d3cc8692e97204b278d2c85..eb5658df3b96e3bc234e53d815bde4cecb4ed937 100644
--- a/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1
+++ b/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1
@@ -4,7 +4,7 @@ $ErrorActionPreference = "Stop"
 $AIDGE_DEPENDENCIES = $env:AIDGE_DEPENDENCIES -split ' '
 Write-Host "Aidge dependencies : $AIDGE_DEPENDENCIES"
 if ( $($AIDGE_DEPENDENCIES.Length) -eq 0) {
-        Write-Host "- No dependencies provided for current repsitory"
+        Write-Host "- No dependencies provided for current repository"
         New-Item -ItemType Directory -Force -Path ".\build" | Out-Null
         Remove-Item -Path ".\build\*" -Recurse -Force
     } else {
diff --git a/CHANGELOG b/CHANGELOG
index 0031beb91337e681884cd5a1d8c420a099a27861..b53d52720c420583973bee58f8ba2b290f0879af 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,6 +1,8 @@
+# Version 0.4.0 (December 2024)
+
 # Version 0.2.1 (May 14, 2024)
 
-* rework export mechanism 
+* rework export mechanism
 * change `Operator::computeOutputDims()` with `Operator::forwardDims()`
 * automatic docstring decorators for python
 * add implementation of Operators only performing data/format manipulation
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 499c2971cb60f979e72419cf65b9897d0613bf0a..beec9fbb427afadccef156139bd277d743e6999f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -5,7 +5,7 @@ file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" version)
 
 project(aidge_core
         VERSION ${version}
-        DESCRIPTION "Core algorithms for operators and graph of the AIDGE framework" 
+        DESCRIPTION "Core algorithms for operators and graph of the AIDGE framework"
         LANGUAGES CXX)
 message(STATUS "Project name: ${CMAKE_PROJECT_NAME}")
 message(STATUS "Project version: ${version}")
@@ -14,6 +14,9 @@ add_definitions(-DPROJECT_VERSION="${version}")
 message(STATUS "Project name: ${CMAKE_PROJECT_NAME}")
 message(STATUS "Project version: ${version}")
 
+# helper for LSP users
+set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
+
 # Note : project name is {project} and python module name is also {project}
 set(module_name _${CMAKE_PROJECT_NAME}) # target name
 set(pybind_module_name ${CMAKE_PROJECT_NAME}) # name of submodule for python bindings
@@ -26,6 +29,7 @@ option(TEST "Enable tests" ON)
 option(COVERAGE "Enable coverage" OFF)
 option(ENABLE_ASAN "Enable ASan (AddressSanitizer) for runtime analysis of memory use (over/underflow, memory leak, ...)" OFF)
 
+
 ##############################################
 # Import utils CMakeLists
 set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
@@ -36,22 +40,28 @@ endif()
 
 ##############################################
 # Find system dependencies
-Include(FetchContent)
-
 set(FMT_VERSION 10.2.1)
-message(STATUS "Retrieving fmt ${FMT_VERSION} from git")
-FetchContent_Declare(
-    fmt
-    GIT_REPOSITORY https://github.com/fmtlib/fmt.git
-    GIT_TAG        ${FMT_VERSION} # or a later release
-)
 
-set(FMT_SYSTEM_HEADERS ON)
-FetchContent_MakeAvailable(fmt)
-set_property(TARGET fmt PROPERTY POSITION_INDEPENDENT_CODE ON)
+find_package(fmt ${FMT_VERSION} QUIET)
 
-find_package(Threads REQUIRED)
+if(NOT fmt_FOUND)
+    message(STATUS "fmt not found in system, retrieving from git")
+    Include(FetchContent)
 
+    FetchContent_Declare(
+        fmt
+        GIT_REPOSITORY https://github.com/fmtlib/fmt.git
+        GIT_TAG        ${FMT_VERSION}
+    )
+
+    set(FMT_SYSTEM_HEADERS ON)
+    FetchContent_MakeAvailable(fmt)
+    set_property(TARGET fmt PROPERTY POSITION_INDEPENDENT_CODE ON)
+else()
+    message(STATUS "Found system fmt version ${fmt_VERSION}")
+endif()
+
+find_package(Threads REQUIRED)
 ##############################################
 # Create target and set properties
 
diff --git a/MANIFEST.in b/MANIFEST.in
index ae5b7c7c2e07eef97ef72bdb79cca94f8124981b..ed911dd75b59b65b8bfa023584aae8585de6325b 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README.md LICENCE
+include README.md LICENSE
 recursive-include aidge_core *.py 
 recursive-exclude aidge_core/unit_tests *.py
 
diff --git a/README.md b/README.md
index fe8fd5a4252054c730be8e948d0d2e415c009d47..5fa6b938c6e333ac2c2292fc931749b3fd953b4f 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,7 @@ You can find here the C++ code of the Core library of Aidge.
 
 ## Pip installation
 
-To install aidge_core using pip, run the following command in your python environnement :
+To install aidge_core using pip, run the following command in your python environment :
 ``` bash
 pip install . -v
 ```
@@ -23,7 +23,7 @@ pip install . -v
 
 To setup aidge_core using pip in development (or editable mode), use the `--no-build-isolation -e` options to pip.
 
-For instance run the following command in your python environnement for a typical setup :
+For instance run the following command in your python environment for a typical setup :
 ``` bash
 export AIDGE_BUILD_TEST=ON              # enable C++ unit tests
 export AIDGE_PYTHON_BUILD_TYPE=         # default flags (no debug info but fastest build time)
@@ -85,7 +85,7 @@ make all install
 |   Option   | Value type | Description |
 |:----------:|:----------:|:-----------:|
 | *-DCMAKE_INSTALL_PREFIX:PATH* | ``str``  | Path to the install folder |
-| *-DCMAKE_BUILD_TYPE*          | ``str``  | If ``Debug``, compile in debug mode, ``Release`` compile with highest optimisations or "" (empty) , default= ``Release`` |
+| *-DCMAKE_BUILD_TYPE*          | ``str``  | If ``Debug``, compile in debug mode, ``Release`` compile with highest optimizations or "" (empty) , default= ``Release`` |
 | *-DWERROR*                    | ``bool`` | If ``ON`` show warning as error during compilation phase, default=``OFF`` |
 | *-DTEST*                      | ``bool`` | If ``ON`` build C++ unit tests, default=``ON`` |
 | *-DPYBIND*                    | ``bool`` | If ``ON`` activate python binding, default=``OFF`` |
diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py
index 652f485a9d3de6869b55613549172d49913e8509..32042125ed6ecb1d935e240837afe6516706dbcb 100644
--- a/aidge_core/__init__.py
+++ b/aidge_core/__init__.py
@@ -7,8 +7,10 @@ http://www.eclipse.org/legal/epl-2.0.
 
 SPDX-License-Identifier: EPL-2.0
 """
-from .aidge_core import * # import so generated by PyBind
-from .export_utils import ExportNode, generate_file, generate_str
-from .aidge_export_aidge import *
-from . import utils
+from aidge_core.aidge_core import * # import so generated by PyBind
+import aidge_core.export_utils
+import aidge_core.utils
+from aidge_core.aidge_export_aidge import serialize_to_cpp
+from aidge_core.show_graphview import gview_to_json
+from aidge_core.mem_info import *
 from ._version import *
diff --git a/aidge_core/aidge_export_aidge/__init__.py b/aidge_core/aidge_export_aidge/__init__.py
index c5d6f96b3300c0e86147baac30d7cae8a3a0b798..9f042cdbcfff071dabc9a31817bf8e2e95c36ad9 100644
--- a/aidge_core/aidge_export_aidge/__init__.py
+++ b/aidge_core/aidge_export_aidge/__init__.py
@@ -5,4 +5,4 @@ FILE = Path(__file__).resolve()
 ROOT_EXPORT = FILE.parents[0]
 
 from .operator_export import *
-from .export import export
+from .export import serialize_to_cpp
diff --git a/aidge_core/aidge_export_aidge/export.py b/aidge_core/aidge_export_aidge/export.py
index b0e859b71e77bb03b95acb6ca75dcf09a8af0722..f5d8c8c7ca6f3fa7c7ef19b1aef3987c20acd1f7 100644
--- a/aidge_core/aidge_export_aidge/export.py
+++ b/aidge_core/aidge_export_aidge/export.py
@@ -2,15 +2,14 @@ import aidge_core
 import shutil
 import os
 from pathlib import Path
-from .utils import supported_operators, OPERATORS_REGISTRY
-from . import ROOT_EXPORT
-
-
-from aidge_core import ExportNode, generate_file
 
+import aidge_core.export_utils
+from . import ROOT_EXPORT
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
 
+from aidge_core.export_utils  import generate_file
 
-def export(export_folder: str,
+def serialize_to_cpp(export_folder: str,
            graph_view: aidge_core.GraphView,
            enable_python_binding: bool = True,
            ):
@@ -54,12 +53,11 @@ def export(export_folder: str,
     ### Generating an export for each nodes and dnn file ###
     list_configs = []  # List of headers to include in dnn.cpp to access attribute and parameters
     list_actions = []  # List of string to construct graph
-    set_operator = set()
+    list_operators = [] # List of operator types used (to be made unique latter)
     # Queue of Aidge nodes to explore, guarantee a topological exploration of the graph
     open_nodes = list(graph_view.get_input_nodes())
     # List of Aidge nodes already explored
     closed_nodes = []
-
     while open_nodes:
         node = open_nodes.pop(0)
         if node in closed_nodes:
@@ -80,25 +78,37 @@ def export(export_folder: str,
             continue
         # Next nodes to treat are children of current node
         open_nodes += list(node.get_children())
+        node.get_operator().set_backend(ExportSerialize._name)
+        op_impl = node.get_operator().get_impl()
+        if op_impl is None:
+            raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.")
+        if not isinstance(op_impl, ExportSerialize):
+            raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).")
+        required_specs = op_impl.get_required_spec()
+        specs = op_impl.get_best_match(required_specs)
+        export_node = op_impl.get_export_node(specs)
+        if export_node is None:
+            raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
+        op = export_node(
+            node, None)
+
 
-        if node.type() in supported_operators():
-            set_operator.add(node.type())
-            op = OPERATORS_REGISTRY[node.type()](node)
+        # set_operator.add(node.type())
 
-            # TODO: list_configs and list_actions don't need to be passed by argument
-            # Export the configuration
-            list_configs = op.export(export_folder_path, list_configs)
+        # TODO: list_configs and list_actions don't need to be passed by argument
+        # Export the configuration
+        list_configs += op.export(export_folder_path)
 
-            # Add forward kernel
-            list_actions = op.forward(list_actions)
-        else:
-            raise RuntimeError(f"Operator: {node.type()} is not supported")
+        # Add forward kernel
+        list_actions += op.forward()
         closed_nodes.append(node)
+    list_operators = list(dict.fromkeys(list_operators)) # make unique
+
     # Generate full dnn.cpp
-    aidge_core.generate_file(
+    aidge_core.export_utils.generate_file(
         export_folder_path / "src/dnn.cpp",
         ROOT_EXPORT / "templates/dnn.jinja",
         headers=list_configs,
-        operators=set_operator,
+        operators=list_operators,
         actions=list_actions,
     )
diff --git a/aidge_core/aidge_export_aidge/operator_export/add.py b/aidge_core/aidge_export_aidge/operator_export/add.py
new file mode 100644
index 0000000000000000000000000000000000000000..4eb7c3e37f0d63388a5bfe8600f184b9da2ffc49
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/add.py
@@ -0,0 +1,14 @@
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
+
+@ExportSerialize.register("Add", ImplSpec(IOSpec(dtype.any)))
+class Add(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = ""
+        self.forward_template = str(
+            ROOT_EXPORT / "templates/graph_ctor/add.jinja")
+        self.include_list = ["aidge/operator/Add.hpp"]
+        self.kernels_to_copy = []
diff --git a/aidge_core/aidge_export_aidge/operator_export/conv.py b/aidge_core/aidge_export_aidge/operator_export/conv.py
index fb7092fb18982a3cc3f11a1ca47394ce2f77d0b6..ea23c1551787c8579549d54a1fe7396995eb1bff 100644
--- a/aidge_core/aidge_export_aidge/operator_export/conv.py
+++ b/aidge_core/aidge_export_aidge/operator_export/conv.py
@@ -1,31 +1,17 @@
-from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
 from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from aidge_core import ExportNode, generate_file, generate_str
-from pathlib import Path
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
 
-@operator_register("Conv")
-class Conv(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-    def export(self, export_folder:Path, list_configs:list):
-        include_path = f"attributes/{self.name}.hpp"
-        filepath = export_folder / f"include/{include_path}"
-
-        generate_file(
-            filepath,
-            ROOT_EXPORT / "templates/attributes/conv.jinja",
-            name=self.name,
-            **self.attributes
-        )
-        list_configs.append(include_path)
-        return list_configs
-
-    def forward(self, list_actions:list):
-        list_actions.append(generate_str(
-            ROOT_EXPORT /"templates/graph_ctor/conv.jinja",
-            name=self.name,
-            inputs=parse_node_input(self.node.inputs()),
-            **self.attributes
-        ))
-        return list_actions
+@ExportSerialize.register(["Conv1D", "Conv2D"], ImplSpec(IOSpec(dtype.any)))
+class Conv(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = str(
+            ROOT_EXPORT / "templates/attributes/conv.jinja")
+        self.forward_template = str(
+            ROOT_EXPORT /"templates/graph_ctor/conv.jinja")
+        self.include_list = ["aidge/operator/Conv.hpp"]
+        self.kernels_to_copy = []
+        self.config_path = "include/attributes"
+        self.config_extension = "hpp"
diff --git a/aidge_core/aidge_export_aidge/operator_export/conv_depth_wise.py b/aidge_core/aidge_export_aidge/operator_export/conv_depth_wise.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e04f8aac17da5662a3ed08bc627969dbb3a9c13
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/conv_depth_wise.py
@@ -0,0 +1,17 @@
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
+
+@ExportSerialize.register(["ConvDepthWise1D", "ConvDepthWise2D"], ImplSpec(IOSpec(dtype.any)))
+class ConvDepthWise(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = str(
+            ROOT_EXPORT / "templates/attributes/conv_depth_wise.jinja")
+        self.forward_template = str(
+            ROOT_EXPORT /"templates/graph_ctor/conv_depth_wise.jinja")
+        self.include_list = ["aidge/operator/ConvDepthWise.hpp"]
+        self.kernels_to_copy = []
+        self.config_path = "include/attributes"
+        self.config_extension = "hpp"
diff --git a/aidge_core/aidge_export_aidge/operator_export/fc.py b/aidge_core/aidge_export_aidge/operator_export/fc.py
index fcd528528707dc6eec917790b46e509c2984fa66..4f964a9942600d46740b570975a218b4c2e7aabd 100644
--- a/aidge_core/aidge_export_aidge/operator_export/fc.py
+++ b/aidge_core/aidge_export_aidge/operator_export/fc.py
@@ -1,37 +1,18 @@
-from aidge_core.aidge_export_aidge.utils import operator_register,parse_node_input
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
 from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from aidge_core import ExportNode, generate_file, generate_str
-from pathlib import Path
-
-@operator_register("FC")
-class FC(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-
-    def export(self, export_folder:Path, list_configs:list):
-
-
-        include_path = f"attributes/{self.name}.hpp"
-        filepath = export_folder / f"include/{include_path}"
-
-
-        generate_file(
-            filepath,
-            ROOT_EXPORT / "templates/attributes/fc.jinja",
-            name=self.name,
-            InChannels=self.inputs_dims[1][1],
-            OutChannels=self.operator.out_channels(),
-            **self.attributes
-        )
-        list_configs.append(include_path)
-        return list_configs
-
-    def forward(self, list_actions:list):
-        list_actions.append(generate_str(
-            ROOT_EXPORT / "templates/graph_ctor/fc.jinja",
-            name=self.name,
-            inputs=parse_node_input(self.node.inputs()),
-            **self.attributes
-        ))
-        return list_actions
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
+
+
+@ExportSerialize.register("FC", ImplSpec(IOSpec(dtype.any)))
+class FC(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = str(
+            ROOT_EXPORT / "templates/attributes/fc.jinja")
+        self.forward_template = str(
+            ROOT_EXPORT / "templates/graph_ctor/fc.jinja")
+        self.include_list = ["aidge/operator/FC.hpp"]
+        self.kernels_to_copy = []
+        self.config_path = "include/attributes"
+        self.config_extension = "hpp"
diff --git a/aidge_core/aidge_export_aidge/operator_export/maxpooling.py b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
index 0c63e71b423b90f62536cafd25c61101e76e0562..6d9c7998fb90153bfdfd2898c1dfcfb1ad730f20 100644
--- a/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
+++ b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
@@ -1,32 +1,17 @@
-from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
 from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from aidge_core import ExportNode, generate_file, generate_str
-from pathlib import Path
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
 
-@operator_register("MaxPooling")
-class MaxPooling(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-
-    def export(self, export_folder:Path, list_configs:list):
-        include_path = f"attributes/{self.name}.hpp"
-        filepath = export_folder / f"include/{include_path}"
-
-        generate_file(
-            filepath,
-            ROOT_EXPORT / "templates/attributes/maxpooling.jinja",
-            name=self.name,
-            **self.attributes
-        )
-        list_configs.append(include_path)
-        return list_configs
-
-    def forward(self, list_actions:list):
-        list_actions.append(generate_str(
-            ROOT_EXPORT / "templates/graph_ctor/maxpooling.jinja",
-            name=self.name,
-            inputs=parse_node_input(self.node.inputs()),
-            **self.attributes
-        ))
-        return list_actions
+@ExportSerialize.register(["MaxPooling1D", "MaxPooling2D", "MaxPooling3D"], ImplSpec(IOSpec(dtype.any)))
+class MaxPooling(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = str(
+            ROOT_EXPORT / "templates/attributes/maxpooling.jinja")
+        self.forward_template = str(
+            ROOT_EXPORT / "templates/graph_ctor/maxpooling.jinja")
+        self.include_list = ["aidge/operator/MaxPooling.hpp"]
+        self.kernels_to_copy = []
+        self.config_path = "include/attributes"
+        self.config_extension = "hpp"
diff --git a/aidge_core/aidge_export_aidge/operator_export/pad.py b/aidge_core/aidge_export_aidge/operator_export/pad.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d6869de07b985169399697e95b6b719f658c911
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/pad.py
@@ -0,0 +1,17 @@
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
+
+@ExportSerialize.register(["Pad1D", "Pad2D"], ImplSpec(IOSpec(dtype.any)))
+class Pad(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = str(
+            ROOT_EXPORT / "templates/attributes/pad.jinja")
+        self.forward_template = str(
+            ROOT_EXPORT /"templates/graph_ctor/pad.jinja")
+        self.include_list = ["aidge/operator/Pad.hpp"]
+        self.kernels_to_copy = []
+        self.config_path = "include/attributes"
+        self.config_extension = "hpp"
diff --git a/aidge_core/aidge_export_aidge/operator_export/producer.py b/aidge_core/aidge_export_aidge/operator_export/producer.py
index d082e9726b7ca33fbe6f4692bf7b55930b69cb9d..02f2f1f39c6797d7f92a5938d6dbe8853079a624 100644
--- a/aidge_core/aidge_export_aidge/operator_export/producer.py
+++ b/aidge_core/aidge_export_aidge/operator_export/producer.py
@@ -1,65 +1,30 @@
-from aidge_core.aidge_export_aidge.utils import operator_register
-from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from aidge_core import dtype, ExportNode, generate_file, generate_str
 import numpy as np
-from pathlib import Path
-
-# Convert aidge datatype to C++ type
-datatype_converter = {
-    dtype.float64 : "double",
-    dtype.float32 : "float",
-    dtype.float16 : "half_float::half",
-    dtype.int8    : "int8_t",
-    dtype.int16   : "int16_t",
-    dtype.int32   : "int32_t",
-    dtype.int64   : "int64_t",
-    dtype.uint8   : "uint8_t",
-    dtype.uint16  : "uint16_t",
-    dtype.uint32  : "uint32_t",
-    dtype.uint64  : "uint64_t"
-}
 
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
 
-@operator_register("Producer")
-class Producer(ExportNode):
+@ExportSerialize.register("Producer", ImplSpec(IOSpec(dtype.any)))
+class Producer(ExportNodeCpp):
     """
     If there is a standardization of the export operators
     then this class should be just a inheritance of ProducerCPP
     """
-    def __init__(self, node):
-        super().__init__(node)
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
         child, in_idx = self.node.output(0)[0]
-        self.tensor_name = f"{child.name()}_{in_idx}"
-        self.values = np.array(self.operator.get_output(0))
-
-    def export(self, export_folder:Path, list_configs:list):
-        assert(len(self.node.output(0)) == 1)
 
-        include_path = f"parameters/{self.tensor_name}.hpp"
-        filepath = export_folder / f"include/{include_path}"
+        self.values = np.array(self.operator.get_output(0))
 
-        aidge_tensor = self.operator.get_output(0)
-        aidge_type = aidge_tensor.dtype()
-        if aidge_type in datatype_converter:
-            datatype = datatype_converter[aidge_type]
-        else:
-            raise RuntimeError(f"No conversion found for data type {aidge_type}.")
-        generate_file(
-            filepath,
-            ROOT_EXPORT / "templates/parameter.jinja",
-            dims = aidge_tensor.dims(),
-            data_t = datatype, # TODO : get data from producer
-            name = self.tensor_name,
-            values = str(aidge_tensor)
-        )
-        list_configs.append(include_path)
-        return list_configs
+        self.config_template = str(
+            ROOT_EXPORT / "templates/parameter.jinja")
+        self.forward_template = str(
+            ROOT_EXPORT / "templates/graph_ctor/producer.jinja")
+        self.attributes["tensor_name"] = f"{child.name()}_{in_idx}"
+        self.attributes["values"] = str(self.operator.get_output(0))
+        self.include_list = ["aidge/operator/Producer.hpp"]
+        self.kernels_to_copy = []
+        self.config_path = "include/attributes"
+        self.config_extension = "hpp"
 
-    def forward(self, list_actions:list):
-        list_actions.append(generate_str(
-            ROOT_EXPORT / "templates/graph_ctor/producer.jinja",
-            name=self.name,
-            tensor_name=self.tensor_name,
-            **self.attributes
-        ))
-        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/relu.py b/aidge_core/aidge_export_aidge/operator_export/relu.py
index c0f4f6afdc35737a8967f51c1859bda0c9773f88..b8398e30504b534fba755e6c613d361d873e09cd 100644
--- a/aidge_core/aidge_export_aidge/operator_export/relu.py
+++ b/aidge_core/aidge_export_aidge/operator_export/relu.py
@@ -1,21 +1,14 @@
-from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
-from aidge_core import ExportNode, generate_str
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
 from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from pathlib import Path
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
 
-@operator_register("ReLU")
-class ReLU(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-    def export(self, export_folder:Path, list_configs:list):
-        return list_configs
-
-    def forward(self, list_actions:list):
-        list_actions.append(generate_str(
-            ROOT_EXPORT / "templates/graph_ctor/relu.jinja",
-            name=self.name,
-            inputs=parse_node_input(self.node.inputs()),
-            **self.attributes
-        ))
-        return list_actions
+@ExportSerialize.register("ReLU", ImplSpec(IOSpec(dtype.any)))
+class ReLU(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = ""
+        self.forward_template = str(
+            ROOT_EXPORT / "templates/graph_ctor/relu.jinja")
+        self.include_list = ["aidge/operator/ReLU.hpp"]
+        self.kernels_to_copy = []
diff --git a/aidge_core/aidge_export_aidge/operator_export/sub.py b/aidge_core/aidge_export_aidge/operator_export/sub.py
index efcdd0924fbcf6944b0fb95a967e1a3e16ccc3c5..01b68b70f4cfcf3b3899202269106c58cb7e54a1 100644
--- a/aidge_core/aidge_export_aidge/operator_export/sub.py
+++ b/aidge_core/aidge_export_aidge/operator_export/sub.py
@@ -1,21 +1,14 @@
-from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
-from aidge_core import ExportNode, generate_str
+from aidge_core.aidge_export_aidge.registry import ExportSerialize
 from aidge_core.aidge_export_aidge import ROOT_EXPORT
-from pathlib import Path
+from aidge_core.export_utils import ExportNodeCpp
+from aidge_core import ImplSpec, IOSpec, dtype
 
-@operator_register("Sub")
-class Sub(ExportNode):
-    def __init__(self, node):
-        super().__init__(node)
-
-    def export(self, export_folder:Path, list_configs:list):
-        return list_configs
-
-    def forward(self, list_actions:list):
-        list_actions.append(generate_str(
-            ROOT_EXPORT / "templates/graph_ctor/sub.jinja",
-            name=self.name,
-            inputs=parse_node_input(self.node.inputs()),
-            **self.attributes
-        ))
-        return list_actions
+@ExportSerialize.register("Sub", ImplSpec(IOSpec(dtype.any)))
+class Sub(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = ""
+        self.forward_template = str(
+            ROOT_EXPORT / "templates/graph_ctor/sub.jinja")
+        self.include_list = ["aidge/operator/Sub.hpp"]
+        self.kernels_to_copy = []
diff --git a/aidge_core/aidge_export_aidge/registry.py b/aidge_core/aidge_export_aidge/registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfed0822ff2d8351af60cd211c190a802fe4a674
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/registry.py
@@ -0,0 +1,7 @@
+from aidge_core.export_utils import ExportLib
+from . import ROOT_EXPORT
+import aidge_core
+
+
+class ExportSerialize(ExportLib):
+    _name="export_serialize"
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
index 48d07e8db8d5fb116148e9d41100fffa01fcf622..58c52abec545f8e62e21dfb35c9f4a5d652f681e 100644
--- a/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
+++ b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
@@ -1,17 +1,17 @@
 #ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
 #define EXPORT_ATTRIBUTES_{{name|upper}}_H
 
-#define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
-#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
+#define _{{name|upper}}_IN_CHANNELS  {{in_chan[0]}}
+#define _{{name|upper}}_OUT_CHANNELS {{out_chan[0]}}
 
-{% for i in range(KernelDims|length) %}
-#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}}
+{% for i in range(kernel_dims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}}
 {%- endfor %}
-{% for i in range(StrideDims|length) %}
-#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}}
+{% for i in range(stride_dims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}}
 {%- endfor %}
-{% for i in range(DilationDims|length) %}
-#define _{{name|upper}}_DILATION_{{i}} {{DilationDims[i]}}
+{% for i in range(dilation_dims|length) %}
+#define _{{name|upper}}_DILATION_{{i}} {{dilation_dims[i]}}
 {%- endfor %}
 
 #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/conv_depth_wise.jinja b/aidge_core/aidge_export_aidge/templates/attributes/conv_depth_wise.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..7c2ffff448bb3d028f378ba6fc124abdad6e9ad7
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/conv_depth_wise.jinja
@@ -0,0 +1,16 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+#define _{{name|upper}}_CHANNELS {{out_chan[0]}}
+
+{% for i in range(kernel_dims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}}
+{%- endfor %}
+{% for i in range(stride_dims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}}
+{%- endfor %}
+{% for i in range(dilation_dims|length) %}
+#define _{{name|upper}}_DILATION_{{i}} {{dilation_dims[i]}}
+{%- endfor %}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
index e292f9b611978877c47b15e91f926f30d27a1cc5..32f4d00515b1ced28b5e49889c09759f0f0dd0db 100644
--- a/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
+++ b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
@@ -1,7 +1,7 @@
 #ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
 #define EXPORT_ATTRIBUTES_{{name|upper}}_H
 
-#define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
-#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
+#define _{{name|upper}}_IN_CHANNELS  {{in_chan[0]}}
+#define _{{name|upper}}_OUT_CHANNELS {{out_chan[0]}}
 
 #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
index d258f580e6ff9c523a87b834fdccf2f3b14fb133..96de14b01167a2f4267343594ded8df6f4b5576d 100644
--- a/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
+++ b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
@@ -1,13 +1,13 @@
 #ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
 #define EXPORT_ATTRIBUTES_{{name|upper}}_H
 
-{% for i in range(KernelDims|length) %}
-#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}}
+{% for i in range(kernel_dims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{kernel_dims[i]}}
 {%- endfor %}
-{% for i in range(StrideDims|length) %}
-#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}}
+{% for i in range(stride_dims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{stride_dims[i]}}
 {%- endfor %}
 
-#define _{{name|upper}}_CEIL_MODE {{CeilMode|int}}
+#define _{{name|upper}}_CEIL_MODE {{ceil_mode|int}}
 
 #endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/pad.jinja b/aidge_core/aidge_export_aidge/templates/attributes/pad.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8fb76a6b220c643da9a3f02d38f1757fed0c1b86
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/pad.jinja
@@ -0,0 +1,12 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+{%- set half_length = (begin_end_borders|length / 2)|int -%}
+{% for i in range(half_length) %}
+#define _{{name|upper}}_BEGIN_BORDERS_{{i}}  {{begin_end_borders[2*i]}}
+#define _{{name|upper}}_END_BORDERS_{{i}}  {{begin_end_borders[2*i+1]}}
+{%- endfor %}
+#define _{{name|upper}}_BORDER_TYPE {{border_type|int}}
+#define _{{name|upper}}_BORDER_VALUE {{border_value}}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/dnn.jinja b/aidge_core/aidge_export_aidge/templates/dnn.jinja
index 5da46b2d8a439a359dfb1c7ec8ebc18e8d516767..bb8faff4a800d9676317f5c0301827e21d19df6d 100644
--- a/aidge_core/aidge_export_aidge/templates/dnn.jinja
+++ b/aidge_core/aidge_export_aidge/templates/dnn.jinja
@@ -17,7 +17,7 @@
 
 /*** OPERATOR ATTRIBUTES & PARAMETERS ***/
 {%- for header in headers %}
-#include "{{ header }}"
+#include "{{ header | replace('include/', '') }}"
 {%- endfor %}
 
 /*** HEADER ***/
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
index 8e841ea2a10c71b884736dcbd7cfd03b52c5ad4f..d9f59a94663692b00594f0ecf5f452cc8e2132ca 100644
--- a/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
@@ -1,7 +1,7 @@
 {# NOTE: Trying a shorter notation like {%- for input in inputs if input[0] %}
 will mess up loop.index as the input set up at None will not increment ! #}
-{%- for input in inputs %}
-{%- if input[0] %}
-{{input[0]}}->addChild({{name}}, {{input[1]}}, {{loop.index - 1}}); {# NOTE: loop.index begin at 1 #}
+{%- for input_node, out_id in node.inputs() %}
+{%- if input_node %}
+{{input_node.name()}}->addChild({{name}}, {{out_id}}, {{loop.index - 1}}); {# NOTE: loop.index begin at 1 #}
 {%- endif %}
 {%- endfor %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/add.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/add.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..2bfaf93646fc24f6a44ac170a8c2c932f5daf0fc
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/add.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Add(
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
index a805f8065e87244bf0546ca42d294b86f144a26d..bd4eed2d39f0872759f568b6cc54b8abe3792db7 100644
--- a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
@@ -5,18 +5,18 @@ std::shared_ptr<Aidge::Node> {{name}} =
             _{{name|upper}}_IN_CHANNELS,
             _{{name|upper}}_OUT_CHANNELS,
             {
-            {%- for i in range(KernelDims|length) -%}
+            {%- for i in range(kernel_dims|length) -%}
                 _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
             {%- endfor -%}
             },
             "{{name}}",
             {
-            {%- for i in range(StrideDims|length) -%}
+            {%- for i in range(stride_dims|length) -%}
                 _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
             {%- endfor -%}
             },
             {
-            {%- for i in range(DilationDims|length) -%}
+            {%- for i in range(dilation_dims|length) -%}
                 _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%}
             {%- endfor -%}
             }
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv_depth_wise.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv_depth_wise.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..f7e1a85bbc084631ea4d26bfadd106bc2a5a69fe
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv_depth_wise.jinja
@@ -0,0 +1,25 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::ConvDepthWise(
+            _{{name|upper}}_CHANNELS,
+            {
+            {%- for i in range(kernel_dims|length) -%}
+                _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            "{{name}}",
+            {
+            {%- for i in range(stride_dims|length) -%}
+                _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            {
+            {%- for i in range(dilation_dims|length) -%}
+                _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            }
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
index c6587c128509712e1a8e903e7484476548e9347d..ceb4784a0942e91b44fe6956833dafda26a2e314 100644
--- a/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
@@ -3,13 +3,13 @@
 std::shared_ptr<Aidge::Node> {{name}} =
         Aidge::MaxPooling(
             {
-            {%- for i in range(KernelDims|length) -%}
+            {%- for i in range(kernel_dims|length) -%}
                 _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
             {%- endfor -%}
             },
             "{{name}}",
             {
-            {%- for i in range(StrideDims|length) -%}
+            {%- for i in range(stride_dims|length) -%}
                 _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
             {%- endfor -%}
             },
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/pad.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/pad.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..a7bd866207be9f048ae431922710b975268a6155
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/pad.jinja
@@ -0,0 +1,17 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+{%- set half_length = (begin_end_borders|length / 2)|int -%}
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Pad<{{half_length}}>(
+            {
+            {%- for i in range(half_length) -%}
+                _{{name|upper}}_BEGIN_BORDERS_{{i}},  _{{name|upper}}_END_BORDERS_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            "{{name}}",
+             static_cast<Aidge::PadBorderType>(_{{name|upper}}_BORDER_TYPE),
+             _{{name|upper}}_BORDER_VALUE
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/parameter.jinja b/aidge_core/aidge_export_aidge/templates/parameter.jinja
index 11a407cc89f72f24167871a594decc6d90ab489d..0ff9634d98c57ad84dabc30625e660ca404612f6 100644
--- a/aidge_core/aidge_export_aidge/templates/parameter.jinja
+++ b/aidge_core/aidge_export_aidge/templates/parameter.jinja
@@ -4,7 +4,7 @@
 #include <aidge/data/Tensor.hpp>
 #include <memory>
 
-std::shared_ptr<Aidge::Tensor> {{name}} = std::make_shared<Aidge::Tensor>(Aidge::Array{{dims|length}}D<{{data_t}}, {{ dims|join(", ") }}> {
+std::shared_ptr<Aidge::Tensor> {{tensor_name}} = std::make_shared<Aidge::Tensor>(Aidge::Array{{out_dims[0]|length}}D<{{out_cdtype[0]}}, {{ out_dims[0]|join(", ") }}> {
 {{ values }}
 });
 
diff --git a/aidge_core/aidge_export_aidge/utils/__init__.py b/aidge_core/aidge_export_aidge/utils.py
similarity index 92%
rename from aidge_core/aidge_export_aidge/utils/__init__.py
rename to aidge_core/aidge_export_aidge/utils.py
index ecdf2aec2692a48e108d5f4ad05ed05803319525..31a0d6ee77eafb0883f2882bfaae999307a0fa4b 100644
--- a/aidge_core/aidge_export_aidge/utils/__init__.py
+++ b/aidge_core/aidge_export_aidge/utils.py
@@ -1,5 +1,3 @@
-from .operator_registry import *
-
 def parse_node_input(node_inputs: list) -> list:
     """Parse node intputs in order to adapt the list for Jinja.
 
diff --git a/aidge_core/aidge_export_aidge/utils/operator_registry.py b/aidge_core/aidge_export_aidge/utils/operator_registry.py
deleted file mode 100644
index dd6fbaaceeba9c2125b38354eca9cc116acd29b1..0000000000000000000000000000000000000000
--- a/aidge_core/aidge_export_aidge/utils/operator_registry.py
+++ /dev/null
@@ -1,18 +0,0 @@
-OPERATORS_REGISTRY = {}
-
-def operator_register(*args):
-
-    key_list = [arg for arg in args]
-
-    def decorator(operator):
-        def wrapper(*args, **kwargs):
-            return operator(*args, **kwargs)
-
-        for key in key_list:
-            OPERATORS_REGISTRY[key] = operator
-
-        return wrapper
-    return decorator
-
-def supported_operators():
-    return list(OPERATORS_REGISTRY.keys())
diff --git a/aidge_core/export_utils/__init__.py b/aidge_core/export_utils/__init__.py
index 6fc846d93301f45b0635cd9b2fabae65fa7be8ab..72472eee0b6850ea76c70253e4fa953ac5785f84 100644
--- a/aidge_core/export_utils/__init__.py
+++ b/aidge_core/export_utils/__init__.py
@@ -1,2 +1,6 @@
-from .node_export import *
-from .code_generation import *
+from .node_export import ExportNode, ExportNodeCpp
+from .code_generation import generate_file, generate_str, copy_file
+from .export_registry import ExportLib
+from .scheduler_export import scheduler_export
+from .tensor_export import tensor_to_c, generate_input_file
+from .generate_main import generate_main_cpp, generate_main_compare_cpp
diff --git a/aidge_core/export_utils/code_generation.py b/aidge_core/export_utils/code_generation.py
index a02fc0966702cec7a2cbe33f8411bb71e3035e90..4f0f4634dd8ac09c8c0a86506dc52d420889b22a 100644
--- a/aidge_core/export_utils/code_generation.py
+++ b/aidge_core/export_utils/code_generation.py
@@ -1,7 +1,8 @@
 from pathlib import Path
-from jinja2 import Environment, FileSystemLoader
+from jinja2 import Environment, FileSystemLoader, StrictUndefined
 from typing import Union
-
+import os
+import shutil
 
 def generate_file(file_path: Union[Path, str], template_path: Union[Path, str], **kwargs) -> None:
     """Generate a file at `file_path` using the jinja template located at `file_path`.
@@ -18,16 +19,14 @@ def generate_file(file_path: Union[Path, str], template_path: Union[Path, str],
         file_path = Path(file_path)
     if isinstance(template_path, str):
         template_path = Path(template_path)
+    if not template_path.exists():
+        raise ValueError(f"Path to template {template_path} is not valid !")
     # Make dir
     file_path.parent.mkdir(parents=True, exist_ok=True)
 
-    # Select template
-    template = Environment(loader=FileSystemLoader(
-        template_path.parent)).get_template(template_path.name)
-
     # Generate file
     with open(file_path, mode="w", encoding="utf-8") as file:
-        file.write(template.render(kwargs))
+        file.write(generate_str(template_path, **kwargs))
 
 
 def generate_str(template_path: Union[Path, str], **kwargs) -> str:
@@ -43,4 +42,12 @@ def generate_str(template_path: Union[Path, str], **kwargs) -> str:
     if isinstance(template_path, str):
         template_path = Path(template_path)
     return Environment(loader=FileSystemLoader(
-        template_path.parent)).get_template(template_path.name).render(kwargs)
+        template_path.parent), undefined=StrictUndefined, keep_trailing_newline=True).get_template(template_path.name).render(kwargs)
+
+def copy_file(filename, dst_folder):
+
+    # If directory doesn't exist, create it
+    if not os.path.exists(dst_folder):
+        os.makedirs(dst_folder)
+
+    shutil.copy(filename, dst_folder)
diff --git a/aidge_core/export_utils/data_conversion.py b/aidge_core/export_utils/data_conversion.py
new file mode 100644
index 0000000000000000000000000000000000000000..401fc39f2a70245a67719699b5f0cdc61108e0cf
--- /dev/null
+++ b/aidge_core/export_utils/data_conversion.py
@@ -0,0 +1,30 @@
+import numpy as np
+import aidge_core
+
+
+datatype_converter_aide2c = {
+    aidge_core.dtype.float64 : "double",
+    aidge_core.dtype.float32 : "float",
+    aidge_core.dtype.float16 : "half_float::half",
+    aidge_core.dtype.int8    : "int8_t",
+    aidge_core.dtype.int16   : "int16_t",
+    aidge_core.dtype.int32   : "int32_t",
+    aidge_core.dtype.int64   : "int64_t",
+    aidge_core.dtype.uint8   : "uint8_t",
+    aidge_core.dtype.uint16  : "uint16_t",
+    aidge_core.dtype.uint32  : "uint32_t",
+    aidge_core.dtype.uint64  : "uint64_t"
+}
+
+def aidge2c(datatype):
+    """Convert a aidge datatype to C type
+
+    :param datatype: Aidge datatype to convert
+    :type datatype: :py:object:`aidge_core.DataType`
+    :return: A string representing the C type
+    :rtype: string
+    """
+    if datatype in datatype_converter_aide2c:
+        return datatype_converter_aide2c[datatype]
+    else:
+        raise ValueError(f"Unsupported {datatype} aidge datatype")
diff --git a/aidge_core/export_utils/export_registry.py b/aidge_core/export_utils/export_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5b6b2098cd760c4d425b96caf7b41cc8e82c46e
--- /dev/null
+++ b/aidge_core/export_utils/export_registry.py
@@ -0,0 +1,229 @@
+from typing import Dict, List
+import aidge_core
+from aidge_core.export_utils import ExportNode
+
+
+class classproperty:
+    """Helper class to define class-level properties.
+
+    Equivalent to applying both the ``@property`` and ``@classmethod`` decorators,
+    allowing methods to be accessed as class properties. These two decorators
+    are otherwise incompatible prior to Python 3.12.
+
+    See discussion: https://discuss.python.org/t/add-a-supported-read-only-classproperty-decorator-in-the-stdlib/18090/12
+    """
+
+    def __init__(self, fget):
+        """
+        :param fget: Function to be wrapped as a class property.
+        :type fget: Callable
+        """
+        self.fget = fget
+
+    def __get__(self, instance, owner):
+        return self.fget(owner)
+
+
+class ExportLib(aidge_core.OperatorImpl):
+    """Aidge export library that manages a registry for operators and static files.
+
+    This class provides a structure for registering different operator types and
+    export nodes, facilitating access and management of these elements.
+
+    :ivar _name: The name of the export library, used for namespacing.
+    :ivar static_files: A dictionary mapping paths of static files to their target locations relative to the export root.
+    """
+    # PUBLIC
+    # Lib name useful ?
+    # Help define namespace
+    _name: str = None
+    # key: Path where static file is
+    # Value: Path where to copy the file relative to the export root
+    static_files: Dict[str, str] = {}
+    # Main memory section
+    mem_section = None
+    # Custom forward generation jinja file
+    forward_template: str = None
+    forward_header_template: str = None
+    # PRIVATE
+    # Registry of exportNode, class level dictionary, shared across all ExportLib
+    _cls_export_node_registry = {}
+
+    def __init__(self, operator):
+        super(ExportLib, self).__init__(operator, self._name)
+        if self._name is None:
+            raise ValueError("ExportLib  {self.__class__.__name__} does not define the attribute ``_name``.")
+
+        # TODO: This is a patch, setBackend method is used to set an ExportLib as an Implementation.
+        # But it also set the backend of the Tensor and we don't define a backend for Tensor.
+        # The following code block associate to the export backend the "cpu" implementation.
+        # This is tracked by the issue :
+        #         https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/issues/178
+
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.float32],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float32]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.float16],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.float16]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.int8],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int8]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.int16],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int16]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.int32],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int32]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.int64],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.int64]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.uint8],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint8]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.uint16],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint16]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.uint32],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint32]))
+        aidge_core.register_Tensor([self._name, aidge_core.dtype.uint64],
+                                    aidge_core.get_key_value_Tensor(["cpu", aidge_core.dtype.uint64]))
+
+    @classproperty
+    def _export_node_registry(cls) -> Dict[str, List['ExportNode']]:
+        """Define as a class property to access the registry at class level while keeping it at instance level.
+
+        :return: The export node registry specific to the class
+        :rtype: Dict[str, List[ExportNode]]
+        """
+        return cls._cls_export_node_registry.setdefault(cls, {})
+
+    def get_available_impl_specs(self) -> List[aidge_core.ImplSpec]:
+        """Override the virtual OperatorImpl method, in order to provide available
+        implementation specifications.
+
+        :return: List of implementation specification available for the type of operator.
+        :rtype: List[aidge_core.ImplSpec]
+        """
+        if self.get_operator().type() in self._export_node_registry:
+            spec_vec = [i for i, _ in self._export_node_registry[self.get_operator().type()]]
+            return spec_vec
+        else:
+            return []
+
+    def get_export_node(self, spec: aidge_core.ImplSpec) -> ExportNode:
+        """Given an :py:class:`aidge_core.ImplSpec`, return the ExportNode that is the closest match.
+
+        :param spec: Implementation specification to match
+        :type spec: :py:class:`aidge_core.ImplSpec
+        :return: The class ExportNode that is the closest match
+        :rtype: aidge_core.ImplSpec
+        """
+        for registered_spec, export_node in self._export_node_registry[self.get_operator().type()]:
+            if registered_spec == spec:
+
+                return export_node
+        return None
+
+    @classmethod
+    def register(cls, op_type, spec):
+        """Decorator to register an operator implementation for a specified operator type.
+
+        Registers an operator under a given operator type and specification,
+        adding it to the export library registry. This method supports both
+        single operator types (str) and lists of types (List[str]).
+
+        :param op_type: The operator type(s) to register.
+        :type op_type: Union[str, List[str]]
+        :param spec: Implementation specification for the operator.
+        :type spec: :py:class:``aidge_core.ImplSpec``
+        :return: A wrapper class that initializes the registered operator.
+        :rtype: Callable
+        """
+        def decorator(operator):
+            class Wrapper(operator):
+                def __init__(self, *args, **kwargs):
+                    return operator(*args, **kwargs)
+            type_list = []
+            if isinstance(op_type, list):
+                type_list = op_type
+            elif isinstance(op_type, str):
+                type_list = [op_type]
+            else:
+                raise TypeError("Argument type of register method should be of type 'List[str]' or 'str', got {type(type)}")
+
+            for type_name in type_list:
+                if (type_name not in cls._export_node_registry):
+                    cls._export_node_registry[type_name] = []
+                cls._export_node_registry[type_name].append((spec, operator))
+
+                register_func: str = f"register_{type_name}Op"
+                # If operator is not defined, then it means we try to register a MetaOperator
+                if register_func not in dir(aidge_core):
+                    raise ValueError(f"Operator of type: {type_name} is not declared as registrable!\nHint: If you try to register a MetaOperator use register_metaop instead.")
+                else:
+                    # Equivalent to aidge_core.register_ConvOp("ExportLibX", ExportLibX)
+                    aidge_core.__getattribute__(register_func)(cls._name, cls)
+            return Wrapper
+        return decorator
+
+    @classmethod
+    def register_metaop(cls, op_type, spec):
+        """Decorator to register a MetaOperator with the export library.
+
+        Registers a MetaOperator under a given operator type and specification. This decorator
+        is intended for operator types that are grouped as meta operators.
+
+        :param op_type: Operator type(s) to register as a ``MetaOperator``.
+        :type op_type: Union[str, List[str]]
+        :param spec: Implementation specification for the MetaOperator.
+        :type spec: aidge_core.ImplSpec
+        :return: A wrapper class that initializes the registered MetaOperator.
+        :rtype: Callable
+        """
+        def decorator(operator):
+            class Wrapper(operator):
+                def __init__(self, *args, **kwargs):
+                    return operator(*args, **kwargs)
+            type_list = []
+            if isinstance(op_type, list):
+                type_list = op_type
+            elif isinstance(op_type, str):
+                type_list = [op_type]
+            else:
+                raise TypeError("Argument 'op_type' of register method should be of type 'List[str]' or 'str', got {type(type)}")
+            for type_name in type_list:
+                if (type_name not in cls._export_node_registry):
+                    cls._export_node_registry[type_name] = []
+                cls._export_node_registry[type_name].append((spec, operator))
+                aidge_core.register_MetaOperatorOp([cls._name, type_name], cls)
+                spec.attrs.add_attr("type", type_name) # MetaOperator specs need to verify the type
+            return Wrapper
+        return decorator
+
+
+    @classmethod
+    def register_generic(cls, op_type, spec):
+        """Decorator to register a GenericOperator with the export library.
+
+        Registers a GenericOperator under a given operator type and specification. This decorator
+        is intended for operator types that are grouped as meta operators.
+
+        :param op_type: Operator type(s) to register as a ``GenericOperator``.
+        :type op_type: Union[str, List[str]]
+        :param spec: Implementation specification for the GenericOperator.
+        :type spec: aidge_core.ImplSpec
+        :return: A wrapper class that initializes the registered GenericOperator.
+        :rtype: Callable
+        """
+        def decorator(operator):
+            class Wrapper(operator):
+                def __init__(self, *args, **kwargs):
+                    return operator(*args, **kwargs)
+            type_list = []
+            if isinstance(op_type, list):
+                type_list = op_type
+            elif isinstance(op_type, str):
+                type_list = [op_type]
+            else:
+                raise TypeError("Argument 'op_type' of register method should be of type 'List[str]' or 'str', got {type(type)}")
+            for type_name in type_list:
+                if (type_name not in cls._export_node_registry):
+                    cls._export_node_registry[type_name] = []
+                cls._export_node_registry[type_name].append((spec, operator))
+                aidge_core.register_GenericOperatorOp([cls._name, type_name], cls)
+                spec.attrs.add_attr("type", type_name) # GenericOperator specs need to verify the type
+            return Wrapper
+        return decorator
diff --git a/aidge_core/export_utils/generate_main.py b/aidge_core/export_utils/generate_main.py
new file mode 100644
index 0000000000000000000000000000000000000000..57fc68bca489a69d7a2c5ec13b920e94f83ebcd6
--- /dev/null
+++ b/aidge_core/export_utils/generate_main.py
@@ -0,0 +1,131 @@
+import aidge_core
+from pathlib import Path
+from aidge_core.export_utils import generate_file, data_conversion
+
+def generate_main_cpp(export_folder: str, graph_view: aidge_core.GraphView, inputs_tensor=None) -> None:
+    """
+    Generate a C++ file to manage the forward pass of a model using the given graph structure.
+
+    This function extracts details from the :py:class:`aidge_core.graph_view` object, including input and output node names, data types,
+    and tensor sizes. It uses this data to populate a C++ file template (`main.jinja`), creating a file (`main.cpp`)
+    that call the `model_forward` function, which handles data flow and processing for the exported model.
+
+    This function also generate files containing input tensor if they have been set.
+
+    :param export_folder: Path to the folder where the generated C++ file (`main.cpp`) will be saved.
+    :type export_folder: str
+    :param graph_view: An instance of :py:class:`aidge_core.graph_view`, providing access to nodes and
+                       ordered input/output data within the computational graph.
+    :type graph_view: aidge_core.graph_view
+    :param inputs_tensor: **For future** argument to provide tensor to use in the main function, not implemented yet!
+    :type inputs_tensor: None
+    :raises RuntimeError: If there is an inconsistency in the output arguments (names, data types, sizes),
+                          indicating an internal bug in the graph representation.
+    """
+    outputs_name: list[str] = []
+    outputs_dtype: list[str] = []
+    outputs_size: list[int] = []
+    inputs_name: list[str] = []
+    gv_inputs: list[tuple[aidge_core.Node, int]] = graph_view.get_ordered_inputs()
+    gv_outputs: list[tuple[aidge_core.Node, int]] = graph_view.get_ordered_outputs()
+
+    for in_node, in_idx in gv_inputs:
+        in_node_input, in_node_input_idx = in_node.input(in_idx)
+        in_name = f"{in_node.name()}_input_{in_idx}" if in_node_input is None else f"{in_node_input.name()}_output_{in_node_input_idx}"
+        inputs_name.append(in_name)
+        input_tensor = in_node.get_operator().get_input(in_idx)
+        if input_tensor is None or input_tensor.undefined() or not input_tensor.has_impl():
+            if inputs_tensor is not None:
+                aidge_core.Log.notice("No support for inputs_tensor argument yet.")
+                aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
+            else:
+                aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
+        else:
+            aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=in_name, tensor=input_tensor)
+
+    for out_node, out_id in gv_outputs:
+        outputs_name.append(f"{out_node.name()}_output_{out_id}")
+        out_tensor = out_node.get_operator().get_output(out_id)
+        outputs_dtype.append(data_conversion.aidge2c(out_tensor.dtype()))
+        outputs_size.append(out_tensor.size())
+
+    if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
+            raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
+
+    ROOT = Path(__file__).resolve().parents[0]
+    generate_file(
+        str(Path(export_folder) / "main.cpp"),
+        str(ROOT / "templates" / "main.jinja"),
+        func_name="model_forward",
+        inputs_name=inputs_name,
+        outputs_name=outputs_name,
+        outputs_dtype=outputs_dtype,
+        outputs_size=outputs_size
+    )
+
+
+def generate_main_compare_cpp(export_folder: str, graph_view: aidge_core.GraphView, inputs_tensor=None) -> None:
+    """
+    Generate a C++ file to manage the forward pass and compare the output of a model.
+
+    This function extracts details from the :py:class:`aidge_core.graph_view` object, including input and output node names, data types,
+    and tensor sizes. It uses this data to populate a C++ file template (`main.jinja`), creating a file (`main.cpp`)
+    that call the `model_forward` function, which handles data flow and processing for the exported model.
+
+    This function also generate files containing input tensor if they have been set.
+
+    :param export_folder: Path to the folder where the generated C++ file (`main.cpp`) will be saved.
+    :type export_folder: str
+    :param graph_view: An instance of :py:class:`aidge_core.graph_view`, providing access to nodes and
+                       ordered input/output data within the computational graph.
+    :type graph_view: aidge_core.graph_view
+    :param inputs_tensor: **For future** argument to provide tensor to use in the main function, not implemented yet!
+    :type inputs_tensor: None
+    :raises RuntimeError: If there is an inconsistency in the output arguments (names, data types, sizes),
+                          indicating an internal bug in the graph representation.
+    """
+    outputs_name: list[str] = []
+    outputs_dtype: list[str] = []
+    outputs_size: list[int] = []
+    inputs_name: list[str] = []
+    gv_inputs: list[tuple[aidge_core.Node, int]] = graph_view.get_ordered_inputs()
+    gv_outputs: list[tuple[aidge_core.Node, int]] = graph_view.get_ordered_outputs()
+
+    for in_node, in_idx in gv_inputs:
+        in_node_input, in_node_input_idx = in_node.input(in_idx)
+        in_name = f"{in_node.name()}_input_{in_idx}" if in_node_input is None else f"{in_node_input.name()}_output_{in_node_input_idx}"
+        inputs_name.append(in_name)
+        input_tensor = in_node.get_operator().get_input(in_idx)
+        if input_tensor is None or input_tensor.undefined() or not input_tensor.has_impl():
+            if inputs_tensor is not None:
+                aidge_core.Log.notice("No support for inputs_tensor argument yet.")
+                aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
+            else:
+                aidge_core.Log.notice(f"No input tensor set for {in_name}, main generated will not be functionnal after code generation.")
+        else:
+            aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=in_name, tensor=input_tensor)
+
+    for out_node, out_id in gv_outputs:
+        out_name = f"{out_node.name()}_output_{out_id}"
+        outputs_name.append(out_name)
+        out_tensor = out_node.get_operator().get_output(out_id)
+        outputs_dtype.append(data_conversion.aidge2c(out_tensor.dtype()))
+        outputs_size.append(out_tensor.size())
+        if out_tensor is None or out_tensor.undefined() or not out_tensor.has_impl():
+                aidge_core.Log.notice(f"No input tensor set for {out_name}, main generated will not be functionnal after code generation.")
+        else:
+            aidge_core.export_utils.generate_input_file(export_folder=export_folder, array_name=out_name+"_expected", tensor=out_tensor)
+
+    if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
+            raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
+
+    ROOT = Path(__file__).resolve().parents[0]
+    generate_file(
+        str(Path(export_folder) / "main.cpp"),
+        str(ROOT / "templates" / "main_compare.jinja"),
+        func_name="model_forward",
+        inputs_name=inputs_name,
+        outputs_name=outputs_name,
+        outputs_dtype=outputs_dtype,
+        outputs_size=outputs_size
+    )
diff --git a/aidge_core/export_utils/node_export.py b/aidge_core/export_utils/node_export.py
index 7aceaa0ccc1f07674241d6f35bbeff90330f2596..5777814a0b10c49d0f75245bdc4e9681027bdfb8 100644
--- a/aidge_core/export_utils/node_export.py
+++ b/aidge_core/export_utils/node_export.py
@@ -1,61 +1,430 @@
-from aidge_core import Node, Attributes
+import aidge_core
+from pathlib import Path
 
+from aidge_core.export_utils import data_conversion, code_generation
 from abc import ABC, abstractmethod
+from typing import List
+
+
+def get_chan(tensor: aidge_core.Tensor) -> int:
+    dformat = tensor.dformat()
+    dims = tensor.dims()
+    if dformat == aidge_core.dformat.default:
+        if len(dims) == 4:  # Suppose NCHW
+            return dims[1]
+        elif len(dims) == 2:  # Suppose NC
+            return dims[1]
+        else:
+            return None
+    elif dformat == aidge_core.dformat.nchw:
+        return dims[1]
+    elif dformat == aidge_core.dformat.nhwc:
+        return dims[3]
+    elif dformat == aidge_core.dformat.chwn:
+        return dims[0]
+    elif dformat == aidge_core.dformat.ncdhw:
+        return dims[1]
+    elif dformat == aidge_core.dformat.ndhwc:
+        return dims[4]
+    elif dformat == aidge_core.dformat.cdhwn:
+        return dims[0]
+    else:
+        raise RuntimeError(f"Unknown dataformat: {dformat}")
+
+
+def get_height(tensor: aidge_core.Tensor) -> int:
+    dformat = tensor.dformat()
+    dims = tensor.dims()
+    if dformat == aidge_core.dformat.default:
+        if len(dims) == 4:  # Suppose NCHW
+            return dims[2]
+        elif len(dims) == 2:  # Suppose NC
+            return 1
+        else:
+            return None
+    elif dformat == aidge_core.dformat.nchw:
+        return dims[2]
+    elif dformat == aidge_core.dformat.nhwc:
+        return dims[1]
+    elif dformat == aidge_core.dformat.chwn:
+        return dims[1]
+    elif dformat == aidge_core.dformat.ncdhw:
+        return dims[3]
+    elif dformat == aidge_core.dformat.ndhwc:
+        return dims[2]
+    elif dformat == aidge_core.dformat.cdhwn:
+        return dims[2]
+    else:
+        raise RuntimeError(f"Unknown dataformat: {dformat}")
+
+
+def get_width(tensor: aidge_core.Tensor) -> int:
+    dformat = tensor.dformat()
+    dims = tensor.dims()
+    if dformat == aidge_core.dformat.default:
+        if len(dims) == 4:  # Suppose NCHW
+            return dims[3]
+        elif len(dims) == 2:  # Suppose NC
+            return 1
+        else:
+            return None
+    elif dformat == aidge_core.dformat.nchw:
+        return dims[3]
+    elif dformat == aidge_core.dformat.nhwc:
+        return dims[2]
+    elif dformat == aidge_core.dformat.chwn:
+        return dims[2]
+    elif dformat == aidge_core.dformat.ncdhw:
+        return dims[4]
+    elif dformat == aidge_core.dformat.ndhwc:
+        return dims[3]
+    elif dformat == aidge_core.dformat.cdhwn:
+        return dims[3]
+    else:
+        raise RuntimeError(f"Unknown dataformat: {dformat}")
 
 
 class ExportNode(ABC):
     """Abstract class to interface node with export generation.
-    """
 
-    @abstractmethod
-    def __init__(self, aidge_node: Node) -> None:
-        """Create ExportNode and retieve attirubtes from ``aidge_node``:
+    This class exposes a dictionary, ``attributes``, which contains all the information
+    required to generate an export for a given node, including input/output names,
+    dimensions, types, and optional memory information.
+
+    - All the attributes of the Aidge Operator are automatically fetch, the key to get an attribute is the attribute name in python format, example ``no_bias``
+
+    - **node** (aidge_core.Node): The Aidge Node instance associated with this ExportNode.
+
+    - **name** (str): Name of the node, typically set via the Aidge node.
+
+    - **nb_in** (int): Number of input connections for the node.
+
+    - **nb_out** (int): Number of output connections for the node.
+
+    - **in_name** (list[str]): Unique name for each input connection.
+
+      Format:
+        - If no input node: ``{node_name}_input_{in_id}``
+        - If there is a parent node: ``{parent_name}_output_{out_id}``
+
+    - **in_dims** (list[list[int]]): Dimensions of each input.
+
+    - **in_node** (list[aidge_core.Node]): List of associated input nodes.
+
+    - **in_size** (list[int]): Size of each input.
+
+    - **in_chan** (list[int]): Channels in each input, based on data format.
+
+    - **in_height** (list[int]): Height of each input, based on data format.
+
+    - **in_width** (list[int]): Width of each input, based on data format.
+
+    - **in_dtype** (list[:py:class:`aidge_core.dtype`]): Data type for each input (Aidge format).
+
+    - **in_cdtype** (list[str]): Data type for each input (C/C++ format).
+
+    - **out_name** (list[str]): Unique name for each output, formatted as ``{name}_output_{out_id}``.
+
+    - **out_node** (list[list[aidge_core.Node]]): Associated output nodes for each output.
+
+    - **out_dims** (list[list[int]]): Dimensions of each output.
+
+    - **out_size** (list[int]): Size of each output.
+
+    - **out_chan** (list[int]): Channels in each output, based on data format.
+
+    - **out_height** (list[int]): Height of each output, based on data format.
+
+    - **out_width** (list[int]): Width of each output, based on data format.
+
+    - **out_dtype** (list[:py:class:`aidge_core.dtype`]): Data type for each output (Aidge format).
+
+    - **out_cdtype** (list[str]): Data type for each output (C/C++ format).
+
+    - **mem_info** (bool): True if `mem_info` is available for this node.
+
+    - **mem_info_size** (list[int]): Memory size for each output, if applicable.
+
+    - **mem_info_offset** (list[int]): Offset to access each output, if applicable.
+
+    - **mem_info_stride** (list[int]): Stride for accessing each output.
 
-        - name: aidge Node name
-        - attributes: dictionnary of attributes of the aidge Operator linked to the node, attributes name follow aidge naming convention
-        - parameters: List of parameters node, order in the list is the same as the one defined by the aidge operator
+    - **mem_info_length** (list[int]): Length of each output.
 
+    - **mem_info_cont_size** (list[int]): Continuous size for each output.
+
+    - **mem_info_cont_offset** (list[int]): Continuous offset for each output.
+
+    - **mem_info_wrap_offset** (list[int]): Wrap offset for each output.
+
+    - **mem_info_wrap_size** (list[int]): Wrap size for each output.
+
+    """
+
+    @abstractmethod
+    def __init__(self, aidge_node: aidge_core.Node, mem_info: List[dict]=None) -> None:
+        """Create ExportNode and retrieve attributes from ``aidge_node``:
         """
+
         super().__init__()
         self.node = aidge_node
         self.operator = aidge_node.get_operator()
-        self.name = self.node.name()
-        self.attributes = self.operator.attr.dict() if self.operator.attr is not None else {} # Attributes are auto fetched from aidge operators
-
-        # rename is_leaf ?
-        self.is_last = len(self.node.get_children()) == 0
-
+        # Attributes are auto fetched from aidge operators
+        self.attributes = {}  if isinstance(self.operator, aidge_core.MetaOperatorOp) or self.operator.attr is None else self.operator.attr.dict()
+        self.attributes["node"] = self.node
+        self.attributes["name"] = self.node.name()
+        self.attributes["nb_in"] = self.node.get_nb_inputs()
+        self.attributes["nb_out"] = self.node.get_nb_outputs()
 
+        # List of input nodes
         self.inputs = []
+        # List of output nodes
         self.outputs = []
-        self.inputs_dims = []
-        self.outputs_dims = []
 
-        for idx, parent_node in enumerate(self.node.get_parents()):
+        self.attributes["in_name"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_node"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_dims"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_size"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_dformat"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_format"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_dtype"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_cdtype"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_chan"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_height"] = [None] * self.attributes["nb_in"]
+        self.attributes["in_width"] = [None] * self.attributes["nb_in"]
+
+        self.attributes["out_name"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_nodes"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_dims"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_size"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_dformat"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_format"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_dtype"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_cdtype"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_chan"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_height"] = [None] * self.attributes["nb_out"]
+        self.attributes["out_width"] = [None] * self.attributes["nb_out"]
+
+        # Producer don't have mem_info
+        # TODO: document this attribute
+        # true if node have mem_info else false
+        self.attributes["mem_info"] = mem_info is not None and self.node.type() != "Producer"
+        if self.attributes["mem_info"]:
+            self.attributes["mem_info_size"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_offset"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_stride"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_length"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_cont_size"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_cont_offset"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_wrap_offset"] = [None] * self.attributes["nb_out"]
+            self.attributes["mem_info_wrap_size"] = [None] * self.attributes["nb_out"]
+
+        for idx, parent_node_in_id in enumerate(self.node.inputs()):
+            parent_node, out_id = parent_node_in_id
             self.inputs.append(parent_node)
-            if parent_node is not None:
-                self.inputs_dims.append(self.operator.get_input(idx).dims())
+            if self.operator.get_input(idx) is not None:
+                tensor = self.operator.get_input(idx)
+                self.attributes["in_name"][idx] = f"{self.attributes['name']}_input_{idx}" if parent_node is None else f"{parent_node.name()}_output_{out_id}"
+                self.attributes["in_node"][idx] = parent_node
+                self.attributes["in_dims"][idx] = tensor.dims()
+                self.attributes["in_size"][idx] = tensor.size()
+                self.attributes["in_dformat"][idx] = tensor.dformat()
+                self.attributes["in_format"][idx] = aidge_core.format_as(tensor.dformat())
+                self.attributes["in_dtype"][idx] = tensor.dtype()
+                self.attributes["in_cdtype"][idx] = data_conversion.aidge2c(
+                    tensor.dtype())
+                self.attributes["in_chan"][idx] = get_chan(tensor)
+                self.attributes["in_height"][idx] = get_height(tensor)
+                self.attributes["in_width"][idx] = get_width(tensor)
+            elif self.operator.input_category(idx) == aidge_core.InputCategory.OptionalParam or \
+                self.operator.input_category(idx) == aidge_core.InputCategory.OptionalData:
+                pass
+            else:
+                raise RuntimeError(f"No input for {self.node.name()} at input {idx}, did you forget to forward dims?")
+        for idx, list_child_node_in_id in enumerate(self.node.outputs()):
+            out_nodes = [node_in_id[0]
+                             for node_in_id in list_child_node_in_id]
+            self.outputs += out_nodes
+            if self.operator.get_output(idx) is not None:
+                tensor = self.operator.get_output(idx)
+                self.attributes["out_name"][idx] = f"{self.attributes['name']}_output_{idx}"
+                self.attributes["out_nodes"][idx] = out_nodes
+                self.attributes["out_dims"][idx] = tensor.dims()
+                self.attributes["out_size"][idx] = tensor.size()
+                self.attributes["out_dformat"][idx] = tensor.dformat()
+                self.attributes["out_format"][idx] = aidge_core.format_as(tensor.dformat())
+                self.attributes["out_dtype"][idx] = tensor.dtype()
+                self.attributes["out_cdtype"][idx] = data_conversion.aidge2c(
+                    tensor.dtype())
+                self.attributes["out_chan"][idx] = get_chan(tensor)
+                self.attributes["out_height"][idx] = get_height(tensor)
+                self.attributes["out_width"][idx] = get_width(tensor)
+                # Output mem_info
+                # TODO: add to docstring
+                if self.attributes["mem_info"]:
+                    if "size" in mem_info[idx]:
+                        self.attributes["mem_info_size"][idx] = mem_info[idx]["size"]
+                    else:
+                        raise RuntimeError("Size is mandatory")
+                    if "offset" in mem_info[idx]:
+                        self.attributes["mem_info_offset"][idx] = mem_info[idx]["offset"]
+                    else:
+                        raise RuntimeError("Offset is mandatory")
+                    if "stride" in mem_info[idx]:
+                        self.attributes["mem_info_stride"][idx] = mem_info[idx]["stride"]
+                    else:
+                        self.attributes["mem_info_stride"][idx] = mem_info[idx]["size"]
+                    if "length" in mem_info[idx]:
+                        self.attributes["mem_info_length"][idx] = mem_info[idx]["length"]
+                    else:
+                        self.attributes["mem_info_length"][idx] = tensor.size()
+                    if "cont_size" in mem_info[idx]:
+                        self.attributes["mem_info_cont_size"][idx] = mem_info[idx]["cont_size"]
+                    else:
+                        self.attributes["mem_info_cont_size"][idx] = mem_info[idx]["size"]
+                    if "cont_offset" in mem_info[idx]:
+                        self.attributes["mem_info_cont_offset"][idx] = mem_info[idx]["cont_offset"]
+                    else:
+                        self.attributes["mem_info_cont_offset"][idx] = mem_info[idx]["offset"]
+                    if "cont_offset" in mem_info[idx]:
+                        self.attributes["mem_info_wrap_offset"][idx] = mem_info[idx]["wrap_offset"]
+                    else:
+                        self.attributes["mem_info_wrap_offset"][idx] = 0
+                    if "wrap_size" in mem_info[idx]:
+                        self.attributes["mem_info_wrap_size"][idx] = mem_info[idx]["wrap_size"]
+                    else:
+                        self.attributes["mem_info_wrap_size"][idx] = 0
             else:
-                if self.operator.get_input(idx) is not None:
-                    self.inputs_dims.append(self.operator.get_input(idx).dims())
-                else:
-                    self.inputs_dims.append(None)
+                raise RuntimeError(f"No output for {self.node.name()}")
 
-        for idx, child_node in enumerate(self.node.get_children()):
-            self.outputs.append(child_node)
+    # **Class Attributes:**
 
-        # Dirty hot fix, change it quickly
-        self.outputs_dims.append(self.operator.get_output(0).dims())
+    # - **config_template** (str): Path to the template defining how to export the node definition.
+    #   This template is required for exporting; if undefined, raises an error, if no config
+    #   template is required set this to an empty string.
 
-    @abstractmethod
-    def export(self, export_folder:str, list_configs:list):
-        """Define how to export the node definition.
-        """
-        pass
+    # - **forward_template** (str): Path to the template for generating code to perform a forward pass
+    #   of the node. Required for exporting the forward pass; raises an error if undefined.
 
-    @abstractmethod
-    def forward(self, list_actions:list):
-        """Define how to generate code to perform a forward pass.
+    # - **include_list** (list[str]): List of include paths (e.g., "include/toto.hpp") to be added to
+    #   the generated export files. Must be defined before export; raises an error if undefined.
+
+    # - **kernels_to_copy** (list[str]): List of paths to kernel files that should be copied during
+    #   export. The kernels are copied to ``kernels_path``, and are automatically
+    #   added to the include list.
+
+    # - **kernels_path** (str): Path where all kernels are stored in the export, prefixed by the
+    #   `export_root`. Defaults to "include/kernels".
+
+    # - **config_path** (str): Path of the configuration folder where node definitions are exported.
+    #   Defaults to "include/layers".
+
+    # - **config_extension** (str): File extension for the configuration files, typically for header
+    #   files. Defaults to "h".
+
+class ExportNodeCpp(ExportNode):
+    """Class for exporting Aidge nodes with C++ code generation.
+
+    This subclass of :class:`ExportNode` defines specific templates,
+    configuration paths, and other attributes required to generate
+    C++ code for Aidge nodes, including header and source files
+    for node definitions and forward passes.
+
+    :var config_template: Path to the template defining how to export the node definition.
+        This template is required for exporting; if undefined, raises an error, if no config
+        template is required set this to an empty string.
+    :vartype config_template: str
+    :var forward_template: Path to the template for generating code to perform a forward pass
+        of the node. Required for exporting the forward pass; raises an error if undefined.
+    :vartype forward_template: str
+    :var include_list: List of include paths (e.g., "include/toto.hpp") to be added to
+        the generated export files. Must be defined before export; raises an error if undefined.
+    :vartype include_list: list[str]
+    :var kernels_to_copy: List of paths to kernel files that should be copied during
+        export. The kernels are copied to ``kernels_path``, and are automatically
+        added to the include list.
+    :vartype kernels_to_copy: list[str]
+    :var kernels_path: Path where all kernels are stored in the export, prefixed by the
+        `export_root`. Defaults to "include/kernels".
+    :vartype kernels_path: str
+    :var config_path: Path of the configuration folder where node definitions are exported.
+        Defaults to "include/layers".
+    :vartype config_path: str
+    :var config_extension: File extension for the configuration files, typically for header
+        files. Defaults to "h".
+    :vartype config_extension: str
+    """
+
+    # Path to the template defining how to export the node definition
+    config_template: str = None
+    # Path to the template defining how to export the node definition
+    forward_template: str = None
+    # List of includes to add example "include/toto.hpp"
+    include_list: list = None
+    # A list of path of kernels to copy in the export
+    # kernels are copied in str(export_folder / "include" / "kernels")
+    # They are automatically added to the include list.
+    kernels_to_copy: list = None
+    # Path where all the kernels are stored in the export (prefixed by export_root)
+    kernels_path: str = "include/kernels"
+    # Path of config folders
+    config_path: str = "include/layers"
+    # Config_folder_extension
+    config_extension: str = "h"
+
+
+    def export(self, export_folder: str):
+        """Defines how to export the node definition.
+
+        This method checks that `config_template`, `include_list`, and
+        `kernels_to_copy` are defined, then copies each kernel to the export folder,
+        appends their paths to the include list, and generates the configuration file
+        based on the `config_template`.
+
+        :param export_folder: Folder path where the files are exported.
+        :type export_folder: str
+        :return: List of include paths with the paths for kernels and configuration files.
+        :rtype: list[str]
         """
-        pass
+        if self.config_template is None:
+            raise ValueError("config_template have not been defined")
+        if self.include_list is None:
+            raise ValueError("include_list have not been defined")
+        if self.kernels_to_copy is None:
+            raise ValueError("kernels_to_copy have not been defined")
+
+        kernel_include_list = []
+        for kernel in self.kernels_to_copy:
+            kernel_path = Path(kernel)
+            code_generation.copy_file(
+                kernel_path,
+                str(export_folder / self.kernels_path)
+            )
+            kernel_include_list.append(
+                self.kernels_path + "/" + kernel_path.stem + kernel_path.suffix)
 
+        if self.config_template != "":
+            path_to_definition = f"{self.config_path}/{self.attributes['name']}.{self.config_extension}"
+
+            try:
+                code_generation.generate_file(
+                    str(export_folder / path_to_definition),
+                    self.config_template,
+                    **self.attributes
+                )
+            except Exception as e:
+                raise RuntimeError(f"Error when creating config file for {self.node.name()}[{self.node.type()}].") from e
+            kernel_include_list.append(path_to_definition)
+
+        return self.include_list + kernel_include_list
+
+    def forward(self):
+        """Generates code for a forward pass using the `forward_template`.
+        """
+        if self.forward_template is None:
+            raise ValueError("forward_template have not been defined")
+        forward_call: str = code_generation.generate_str(
+            self.forward_template,
+            **self.attributes
+        )
+        return [forward_call]
diff --git a/aidge_core/export_utils/scheduler_export.py b/aidge_core/export_utils/scheduler_export.py
new file mode 100644
index 0000000000000000000000000000000000000000..90862578f04ecd0bdf5ee0bfd7643e7bc3d0a455
--- /dev/null
+++ b/aidge_core/export_utils/scheduler_export.py
@@ -0,0 +1,208 @@
+import aidge_core
+import os
+import shutil
+from pathlib import Path
+from aidge_core.export_utils import ExportLib, generate_file, copy_file
+from typing import List, Tuple
+
+
+def scheduler_export(scheduler, export_folder_path: str, export_lib: ExportLib = None, memory_manager=None, memory_manager_args=None, test_mode=False) -> None:
+    """Exports an aidge_core.Scheduler to C++ code.
+
+    This function generates files for a given computation graph, including forward-pass functions,
+    configuration headers, and the main API entry point for the exported model. It requires a
+    memory manager to allocate resources, and optionally an `ExportLib` instance to handle backend
+    configurations for node operators.
+
+
+    1. **Export Preparation**:
+        - Initializes export and DNN folders, checking that required memory management functions are defined.
+        - Retrieves peak memory usage and memory details for each node using the `memory_manager`.
+
+    2. **Configuration Generation**:
+        - Iterates over nodes scheduled by `scheduler`, configuring backends if `export_lib` is specified.
+        - Exports configuration headers and forward-pass actions for each node by invoking `op.export()`
+            and `op.forward()`, appending these to `list_configs` and `list_actions`, respectively.
+        - Collects information on input and output nodes, including their names, data types, and sizes.
+
+    3. **Code Generation**:
+        - Defines the forward-pass function, `model_forward`, with inputs and outputs based on node attributes.
+        - Generates the following files:
+
+            - **forward.cpp**: Implements the model forward pass using templates, applying configurations
+            and actions for each node.
+
+            - **forward.hpp**: Exports the forward API, defining inputs and outputs.
+
+            - **main.cpp**: Main entry file, serving as the model's forward-pass interface.
+
+    4. **Static File Export (Optional)**:
+        - If `export_lib` is specified, static files are copied to the export folder based on `export_lib`
+        specifications.
+
+
+    :param scheduler: Scheduler instance managing the computation graph.
+                    Uses `graph_view` and `get_static_scheduling` methods
+                    to retrieve the computation graph layout and ordered nodes.
+    :type scheduler: aidge_core.Scheduler
+    :param export_folder_path: Path to the folder where the generated export files will be saved.
+                            Creates this folder, along with subdirectories for model and source files.
+    :type export_folder_path: str
+    :param export_lib: Library providing the backend implementation for node operators.
+                    Defaults to None. If provided, each node's backend is set to the library's name.
+    :type export_lib: ExportLib, optional
+    :param memory_manager: Required function for managing memory allocation. It should take
+                        `scheduler` and optional `memory_manager_args` as parameters, returning
+                        `peak_mem` (peak memory usage) and `mem_info` (memory details for each node).
+    :type memory_manager: callable
+    :param memory_manager_args: Additional arguments passed to `memory_manager`. Defaults to an empty dictionary.
+    :type memory_manager_args: dict, optional
+    :param test_mode: Additional argument which may be used during forward generation.
+    :type test_mode: bool, optional
+    """
+    graphview = scheduler.graph_view()
+    export_folder = Path().absolute() / export_folder_path
+
+    os.makedirs(str(export_folder), exist_ok=True)
+
+    dnn_folder = export_folder / "dnn"
+    os.makedirs(str(dnn_folder), exist_ok=True)
+
+    if memory_manager_args is None:
+        memory_manager_args = {}
+
+    if memory_manager is None:
+        raise ValueError("A memory manager is required (no default value yet).")
+    peak_mem, mem_info = memory_manager(
+        scheduler, **memory_manager_args)
+
+    # List of function call for forward.cpp
+    list_actions: List[str] = []
+    # List of headers for forward.cpp
+    list_configs: List[str] = []
+
+    inputs_name: List[str] = []
+    inputs_dtype: List[str] = []
+    outputs_name: List[str] = []
+    outputs_dtype: List[str] = []
+    outputs_size: List[int] = []
+
+    # List of aidge_core.Node ordered by scheduler
+    list_forward_nodes: List[aidge_core.Node] = scheduler.get_static_scheduling()
+
+    # If exportLib define use it
+    # else parse component in platform
+    # if export_lib is None:
+    #     raise ValueError("Export need an ExportLib.")
+    for node in list_forward_nodes:
+        if export_lib is not None:
+            aidge_core.Log.debug(f"Setting backend {export_lib._name} to {node.name()}[{node.type()}].")
+            node.get_operator().set_backend(export_lib._name)
+
+        op_impl = node.get_operator().get_impl()
+        if op_impl is None:
+            raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an implementation.")
+        if not isinstance(op_impl, ExportLib):
+            raise RuntimeError(f"Operator {node.name()}[{node.type()}] doesn't have an exportable backend ({op_impl}).")
+
+        is_input:bool  = node in graphview.get_input_nodes()
+        is_output:bool = node in graphview.get_output_nodes()
+
+        if is_input:
+            # GraphView.get_inputs_nodes() returns the nodes that have an Input set to None or not in the graph
+            # However, some inputs are Optional and thus the node may not be an input of the graph!
+            # So we need to check that all the inputs of the nodes or in the graph or not optional
+            # This is what the following code block is checking.
+            for idx, node_in in enumerate(node.inputs()):
+                optional:bool = node.get_operator().input_category(idx) == aidge_core.InputCategory.OptionalData
+                # Note: node_in is a Tuple(Node, out_idx)
+                in_graph:bool = node_in[0] in graphview.get_nodes()
+                is_input &= (in_graph or not optional)
+
+        # Get operator current specs
+        required_specs = op_impl.get_required_spec()
+        # Get specs of the implementation that match current specs
+        specs = op_impl.get_best_match(required_specs)
+        # Retrieve said implementation
+        export_node = op_impl.get_export_node(specs)
+
+        if export_node is None:
+            raise RuntimeError(f"Could not find export node for {node.name()}[{node.type()}].")
+        # Instanciate ExportNode
+        op = export_node(node, mem_info[node])
+
+        # For configuration files
+        list_configs += op.export(dnn_folder)
+        # For forward file
+        list_actions += op.forward()
+        if is_input:
+            for idx, node_in in enumerate(node.inputs()):
+                if node_in[0] not in graphview.get_nodes():
+                    inputs_name.append(op.attributes["in_name"][idx])
+                    inputs_dtype.append(
+                        op.attributes["in_cdtype"][idx]
+                    )
+        if is_output:
+            for idx in range(len(node.outputs())):
+                outputs_name.append(op.attributes["out_name"][idx])
+                outputs_dtype.append(
+                    op.attributes["out_cdtype"][idx]
+                )
+                outputs_size.append(op.attributes["out_size"][idx])
+
+    func_name = "model_forward"
+    ROOT = Path(__file__).resolve().parents[0]
+
+    forward_template = str(ROOT / "templates" / "forward.jinja")
+    if export_lib.forward_template != None:
+        forward_template = export_lib.forward_template
+
+    list_node_names = []
+    for node in list_forward_nodes:
+        if node.type() != "Producer":
+            list_node_names.append(node.name())
+
+    generate_file(
+        str(dnn_folder / "src" / "forward.cpp"),
+        forward_template,
+        func_name=func_name,
+        headers=set(list_configs),
+        actions=list_actions,
+        # Note: Graph may not have inputs, so we need to check with output
+        # In the future, we should remove this as it is not compatible
+        # with a mix precision approach.
+        mem_ctype=outputs_dtype[0],  # Legacy behavior ...
+        mem_section=export_lib.mem_section,
+        peak_mem=peak_mem,
+        inputs_name=inputs_name,
+        inputs_dtype=inputs_dtype,
+        outputs_name=outputs_name,
+        outputs_dtype=outputs_dtype,
+        test_mode=test_mode,
+        list_node_names=list_node_names
+    )
+
+    forward_header_template = str(ROOT / "templates" / "forward_header.jinja")
+    if export_lib.forward_header_template != None:
+        forward_header_template = export_lib.forward_header_template
+
+    # Generate dnn API
+    generate_file(
+        str(dnn_folder / "include" / "forward.hpp"),
+        forward_header_template,
+        libraries=[],
+        func_name=func_name,
+        inputs_name=inputs_name,
+        inputs_dtype=inputs_dtype,
+        outputs_name=outputs_name,
+        outputs_dtype=outputs_dtype,
+        test_mode=test_mode
+    )
+
+    if len(outputs_name) != len(outputs_dtype) or len(outputs_name) != len(outputs_size):
+        raise RuntimeError("FATAL: Output args list does not have the same length this is an internal bug.")
+
+    if export_lib is not None:
+        # Copy all static files in the export
+        for source, destination in export_lib.static_files.items():
+            copy_file(source, str(export_folder / destination))
diff --git a/aidge_core/export_utils/templates/c_data.jinja b/aidge_core/export_utils/templates/c_data.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..3709379e5c5417233341837d853cc6f68872a194
--- /dev/null
+++ b/aidge_core/export_utils/templates/c_data.jinja
@@ -0,0 +1,6 @@
+{#- For libraries #}
+#include <stdint.h>
+ {# Design header of the array -#}
+static const {{ data_t }} {{ name }}[{{ dims |join("*") }}] __attribute__((section("nn_data"))) = {
+{{ values |join(", ") }}
+};
diff --git a/aidge_core/export_utils/templates/forward.jinja b/aidge_core/export_utils/templates/forward.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..fde4b2a1392c4ada353af06246951e26c6236df6
--- /dev/null
+++ b/aidge_core/export_utils/templates/forward.jinja
@@ -0,0 +1,41 @@
+
+#include <stdint.h>
+
+#ifdef SAVE_OUTPUTS
+#include <sys/types.h>
+#include <sys/stat.h>
+#endif
+
+#include "include/forward.hpp"
+
+// Layer & memory configurations
+{%- for header in headers %}
+#include "{{ header }}"
+{%- endfor %}
+
+// Memory block
+{%- if mem_section == None %}
+static {{mem_ctype}} mem[{{peak_mem}}];
+{%- else %}
+static {{mem_ctype}} mem[{{peak_mem}}] __attribute__((section("{{ mem_section }}")));
+{%- endif %}
+
+{# Forward function #}
+{#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#}
+void {{ func_name }} (
+    {%- for i in range(inputs_name | length) -%}
+    const {{ inputs_dtype[i] }}* {{ inputs_name[i] }},
+    {%- endfor -%}
+    {%- for o in range(outputs_name | length) -%}
+    {{ outputs_dtype[o] }}** {{ outputs_name[o] }}_ptr{% if not loop.last %}, {% endif %}
+    {%- endfor -%})
+{
+
+    {%- for action in actions %}
+    {{ action }}
+    {%- endfor %}
+
+    {%- for output_name in outputs_name %}
+    *{{ output_name }}_ptr = {{ output_name }};
+    {%- endfor %}
+}
diff --git a/aidge_core/export_utils/templates/forward_header.jinja b/aidge_core/export_utils/templates/forward_header.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..574f5323866786f3b6d6f98af53b8ccf3d9975b3
--- /dev/null
+++ b/aidge_core/export_utils/templates/forward_header.jinja
@@ -0,0 +1,25 @@
+#ifndef DNN_HPP
+#define DNN_HPP
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+{#- For libraries #}
+{% for lib in libraries %}
+#include <{{ lib }}>
+{%- endfor %}
+
+void {{ func_name }} (
+    {%- for i in range(inputs_name | length) -%}
+    const {{ inputs_dtype[i] }}* {{ inputs_name[i] }},
+    {%- endfor -%}
+    {%- for o in range(outputs_name | length) %}
+    {{ outputs_dtype[o] }}** {{ outputs_name[o] }}{% if not loop.last %}, {% endif %}
+    {%- endfor -%});
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* DNN_HPP */
diff --git a/aidge_core/export_utils/templates/main.jinja b/aidge_core/export_utils/templates/main.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..697a97b53ef05eedddf5ac66f262581475e655c6
--- /dev/null
+++ b/aidge_core/export_utils/templates/main.jinja
@@ -0,0 +1,40 @@
+
+#include <iostream>
+#include "forward.hpp"
+{% for name in inputs_name %}
+#include "{{ name }}.h"
+{% endfor %}
+
+{% set printf_formats = {
+    "double": "%lf",
+    "float": "%f",
+    "int8_t": "%hhd",
+    "int16_t": "%hd",
+    "int32_t": "%d",
+    "int64_t": "%lld",
+    "uint8_t": "%hhu",
+    "uint16_t": "%hu",
+    "uint32_t": "%u",
+    "uint64_t": "%llu"
+} %}
+
+int main()
+{
+    // Initialize the output arrays
+    {%- for o in range(outputs_name | length) %}
+    {{ outputs_dtype[o] }}* {{ outputs_name[o] }} = nullptr;
+    {% endfor %}
+
+    // Call the forward function
+    {{ func_name }}({{ inputs_name|join(", ") }}{% if inputs_name %}, {% endif %}&{{ outputs_name|join(", &") }});
+
+    // Print the results of each output
+    {%- for o in range(outputs_name | length) %}
+    printf("{{ outputs_name[o] }}:\n");
+    for (int o = 0; o < {{ outputs_size[o] }}; ++o) {
+        printf("{{ printf_formats[outputs_dtype[o]] }} ", {{ outputs_name[o] }}[o]);
+    }
+    printf("\n");
+    {% endfor %}
+    return 0;
+}
diff --git a/aidge_core/export_utils/templates/main_compare.jinja b/aidge_core/export_utils/templates/main_compare.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..3cc4c986db66ef0ded3b3f44ba10394b424f3dbd
--- /dev/null
+++ b/aidge_core/export_utils/templates/main_compare.jinja
@@ -0,0 +1,59 @@
+{% set printf_formats = {
+    "double": "%lf",
+    "float": "%f",
+    "int8_t": "%hhd",
+    "int16_t": "%hd",
+    "int32_t": "%d",
+    "int64_t": "%lld",
+    "uint8_t": "%hhu",
+    "uint16_t": "%hu",
+    "uint32_t": "%u",
+    "uint64_t": "%llu"
+} %}
+#include <cstdio>      // printf
+#include <cmath>       // std::abs
+#include "forward.hpp" // Exported forward
+
+// Inputs
+{% for name in inputs_name %}
+#include "{{ name }}.h"
+{% endfor %}
+
+// Outputs
+{% for name in outputs_name %}
+#include "{{ name }}_expected.h"
+{% endfor %}
+
+int main()
+{
+    // Initialize the output arrays
+    {%- for o in range(outputs_name | length) %}
+    {{ outputs_dtype[o] }}* {{ outputs_name[o] }} = nullptr;
+    {% endfor %}
+
+    const float abs_err = 0.001f;
+    const float rel_err = 0.0001f;
+
+    // Call the forward function
+    {{ func_name }}({{ inputs_name|join(", ") }}{% if inputs_name %}, {% endif %}&{{ outputs_name|join(", &") }});
+
+
+    int nb_identical;
+    int nb_out;
+
+    // Print the results of each output
+    {%- for o in range(outputs_name | length) %}
+    nb_identical = 0;
+    nb_out = 0;
+    printf("{{ outputs_name[o] }}:\n");
+    for (int o = 0; o < {{ outputs_size[o] }}; ++o) {
+        printf("Expected {{ printf_formats[outputs_dtype[o]] }} <-> Predicted {{ printf_formats[outputs_dtype[o]] }}\n", {{ outputs_name[o] }}_expected[o], {{ outputs_name[o] }}[o]);
+        if (std::abs({{ outputs_name[o] }}_expected[o] - {{ outputs_name[o] }}[o]) <= abs_err + rel_err * std::abs({{ outputs_name[o] }}_expected[o]))
+            nb_identical++;
+        nb_out++;
+    }
+    printf("\nNumber of equal outputs: %d / %d\n", nb_identical, nb_out);
+    {% endfor %}
+
+    return 0;
+}
diff --git a/aidge_core/export_utils/tensor_export.py b/aidge_core/export_utils/tensor_export.py
new file mode 100644
index 0000000000000000000000000000000000000000..43f013dd02e730ae72c89a5bbe329c4fbb8a0324
--- /dev/null
+++ b/aidge_core/export_utils/tensor_export.py
@@ -0,0 +1,40 @@
+import os
+
+from aidge_core.export_utils.code_generation import generate_file
+from aidge_core.export_utils.data_conversion import aidge2c
+from aidge_core import Tensor
+from pathlib import Path
+
+def tensor_to_c(tensor:Tensor)->str:
+    """Given a :py:class:``aigd_core.Tensor``, return a C description of the tensor.
+    For example:
+    {
+        {1, 2},
+        {3, 4}
+    }
+
+    :param tensor: Tensor to transform to a string
+    :type tensor: Tensor
+    :return: String representation of a C array
+    :rtype: str
+    """
+    return str(tensor)
+
+def generate_input_file(export_folder:str,
+                        array_name:str,
+                        tensor:Tensor):
+
+    # If directory doesn't exist, create it
+    if not os.path.exists(export_folder):
+        os.makedirs(export_folder)
+    print(f"gen : {export_folder}/{array_name}.h")
+    ROOT = Path(__file__).resolve().parents[0]
+    generate_file(
+        file_path=f"{export_folder}/{array_name}.h",
+        template_path=str(ROOT / "templates" / "c_data.jinja"),
+        dims = tensor.dims(),
+        data_t = aidge2c(tensor.dtype()),
+        name = array_name,
+        values = list(tensor)
+    )
+
diff --git a/aidge_core/mem_info.py b/aidge_core/mem_info.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7ca85bbd73bd205850b19616e53fda210749a80
--- /dev/null
+++ b/aidge_core/mem_info.py
@@ -0,0 +1,155 @@
+import os
+import subprocess
+import shutil
+from pathlib import Path
+import aidge_core
+from typing import Tuple, List
+
+
+# Default memory management, which can be used for development
+def compute_default_mem_info(scheduler: aidge_core.Scheduler) -> Tuple[int, List]:
+    """Basic memory management concatenate memory block, no memory reuse !
+
+    :param scheduler: Aidge scheduler
+    :type scheduler: :py:class:`aidge_core.Scheduler`
+    :return: The total memory size (in number of elements) and a list (of size nb node) of list (of size nb output) of dictionnary (size, offset)
+    :rtype: Tuple[int, list]
+    """
+    mem_info = {}
+    mem_size = 0
+
+    # Exclude Producers and the last layers (because the results are stored outside the export)
+    for i, node in enumerate(scheduler.get_static_scheduling()):
+        if node.type() != "Producer":
+            node_mem_info = []
+            for out_id in range(node.get_nb_outputs()):
+                dims = node.get_operator().get_output(out_id).dims()
+                mem = 1
+                for dim in dims:
+                    mem *= dim
+
+                # Add memeory info
+                node_mem_info.append({
+                    "size": mem,
+                    "offset": mem_size
+                })
+
+                # Increment offset for the next layer
+                mem_size += mem
+            mem_info[node] = node_mem_info
+        else:
+            mem_info[node] = [] # No meminfo for producer
+    return mem_size, mem_info
+
+
+def _gnuplot_installed():
+    try:
+        # Run gnuplot with the --version flag and capture the output
+        subprocess.run(["gnuplot", "--version"])
+        return True
+    except FileNotFoundError:
+        aidge_core.Log.warn("Gnuplot is not installed.")
+        return False
+    except subprocess.CalledProcessError:
+        aidge_core.Log.warn("Gnuplot command found but failed to run.")
+        return False
+
+def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder: Path = None, wrapping: bool = False) -> Tuple[int, List[dict]]:
+    """Generates optimized memory information for a computation graph managed by a scheduler.
+
+    This function analyzes the memory usage of a computation graph, determining the memory peak
+    and detailed memory management information for each node in the scheduler. It supports optional
+    wrapping of memory buffers and logs memory usage statistics to facilitate memory optimization.
+
+    :param scheduler: Scheduler instance that organizes the computation graph. It manages the
+                      nodes and facilitates memory planning by invoking its `generate_memory` method.
+    :type scheduler: aidge_core.Scheduler
+    :param stats_folder: Directory path to store memory statistics and plots generated by `mem_manager`.
+                         If provided as a string, it is converted to a `Path` object, default=None.
+    :type stats_folder: Path, optional
+    :param wrapping: Boolean flag to enable or disable wrap-around buffer optimization.
+                     Defaults to `False`.
+    :type wrapping: bool, optional
+    :return: A tuple containing the peak memory size and a list of memory information for each
+             scheduled node. The memory information for each node includes details such as size,
+             offset, stride, length, count, and optional wrap-around details.
+    :rtype: Tuple[int, List[dict]]
+    """
+    # The forward dims has to done outside the function
+    # Also supposed the generation of the scheduler has been performed outside
+    # Otherwise decomment the following line
+    # scheduler.generate_scheduling()
+    # Generate the memory manager
+    # So far, the Producers are not take in consideration in the meory manager => inc_producers=False
+    mem_manager = scheduler.generate_memory(
+        inc_producers=False, wrap_around_buffer=wrapping)
+
+    # List of nodes which are connected at the input of the graph (None if input is not connected)
+    nodes_at_input = [n[0] for n in scheduler.graph_view().inputs()]
+
+    if stats_folder is not None:
+        if _gnuplot_installed():
+            # Use gnuplot to generate the log
+            os.makedirs(str(Path(stats_folder) / "graph"), exist_ok=True)
+            mem_manager.log("memory_info")
+            os.chmod("memory_info_plot.gnu", 0o777)
+            os.system("./memory_info_plot.gnu")
+            shutil.move("memory_info", str(Path(stats_folder) / "graph" / "memory_info"))
+            shutil.move("memory_info_plot.png", str(
+                Path(stats_folder) / "graph" / "memory_info_plot.png"))
+            os.remove("memory_info_plot.gnu")
+        else:
+            aidge_core.Log.warn("Warning: gnuplot is not installed, could not generate stat folder.")
+    # In the export, we currently use an unified memory buffer whose size
+    # is determined by the memory peak usage
+    mem_size = mem_manager.get_peak_usage()
+    mem_info = {}
+
+    mem_planes = mem_manager.get_planes()
+
+    for node in scheduler.get_static_scheduling():
+        node_mem_info = []
+        if node.type() == "Producer":
+            pass
+        elif node in nodes_at_input:
+            # Input memory management (suppose tensor ends with [:, channel, height, width]))
+            tensor = node.get_operator().get_output(0)
+            if tensor is None:
+                raise RuntimeError("Warning input producer not provided")
+            if len(tensor.dims()) < 3:
+                raise RuntimeError(
+                    f"Input producer dimensions must be with [:, channel, height, width] but got {tensor.dims()} instead")
+            # TODO : use get_chan get_height and get_width function !
+            node_mem_info.append({
+                    "size": tensor.dims()[-3],          # Should be nb_channels
+                    "offset": 0,                        # Suppose input data is stored outside the export function
+                                                        # so the memory offset is not important to consider
+                    "stride": tensor.dims()[-3],        # Should be nb_channels
+                    "length": tensor.dims()[-1],        # Should be width
+                    "count":  tensor.dims()[-2],        # Should be height
+                    "cont_offset": 0,                   # Suppose input data is stored outside the export function
+                                                        # so the memory offset is not important to consider
+                    "cont_size": tensor.dims()[-1] * \
+                                 tensor.dims()[-2] * \
+                                 tensor.dims()[-3],     # Size of input
+                    "wrap_offset": 0,                   # No wrapping
+                    "wrap_size": 0                      # No wrapping
+                })
+        else:
+            for out_id in range(node.get_nb_outputs()):
+                plane = mem_planes[node][out_id]
+                node_mem_info.append({
+                    "size": plane.size,
+                    "offset": plane.get_contiguous_offset(),
+                    "stride": plane.stride,
+                    "length": plane.length,
+                    "count": plane.count,
+                    "cont_offset": plane.get_contiguous_offset(),
+                    "cont_size": plane.get_contiguous_size(),
+                    "wrap_offset": plane.get_wrapped_offset(),
+                    "wrap_size": plane.get_wrapped_size()
+                })
+        mem_info[node] = node_mem_info
+    return mem_size, mem_info
+
+
diff --git a/aidge_core/show_graphview.py b/aidge_core/show_graphview.py
new file mode 100644
index 0000000000000000000000000000000000000000..14bb6c3e9a4c7be1e1aecee02a04a5dc42e0a5d4
--- /dev/null
+++ b/aidge_core/show_graphview.py
@@ -0,0 +1,233 @@
+import os
+import json
+import builtins
+import aidge_core
+import numpy as np
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+def _retrieve_operator_attrs(node : aidge_core.Node) -> Dict[str, Optional[Any]]:
+    """
+    Returns the dictionary containing the attributes of a given Node.
+
+    :param graph: A Node in the list of ordered nodes.
+    :type graph: aidge_core.Node
+
+    :return: A dictionary with the Node's attributes.
+    :rtype: Dict[str, Optional[Any]]
+    """
+
+    if node.get_operator().attr is not None:
+        node_attr_dict =  node.get_operator().attr.dict()
+        for key,value in node_attr_dict.items():
+            if not type(value).__name__ in dir(builtins):
+                node_attr_dict[key] = value.name
+
+    else:
+        node_attr_dict = {}
+
+    return node_attr_dict
+
+def _create_dict(ordered_nodes : List[aidge_core.Node], write_trainable_params_embed : bool, write_trainable_params_ext : bool, path_trainable_params : Path, params_file_format : str) -> Dict[str, Optional[Any]]:
+    """
+    Creates a dictionary to store the information of a given ordered GraphView.
+
+    :param ordered_nodes: A list with the GraphView's ordered nodes.
+    :type graph: list
+    :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed).
+    :type write_trainable_params_embed: bool
+    :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file.
+    :type write_trainable_params_ext: bool
+    :param path_trainable_params: Path of the external file used to store the Nodes' trainable parameters.
+    :type path_trainable_params: Path
+    :param params_file_format: Format of the external file used to store the Nodes' trainable parameters. Options: ``npz`` or ``json``. Default : ``json``. Requires ``write_trainable_params_ext``.
+    :type params_file_format: str
+
+    :return: A dictionary with the GraphView description.
+    :rtype: Dict[str, Optional[Any]]
+    """
+
+    graphview_dict = {'graph': []}
+
+    for node in ordered_nodes:
+
+        if node is not None:
+            node_dict = {'name' : node.name(),
+                         'optype' : node.get_operator().type(),
+                         'nb_inputs' : node.get_operator().nb_inputs(),
+                         'nb_outputs' : node.get_operator().nb_outputs()}
+
+            inputs = []
+            for input_idx in range(node.get_operator().nb_inputs()):
+                input_dict = {'dims' : node.get_operator().get_input(input_idx).dims(),
+                              'data_type' : str(node.get_operator().get_input(input_idx).dtype()),
+                              'data_format' : str(node.get_operator().get_input(input_idx).dformat())}
+                inputs.append(input_dict)
+
+            node_dict['inputs'] = inputs
+
+            outputs = []
+            for output_idx in range(node.get_operator().nb_outputs()):
+                output_dict = {'dims' : node.get_operator().get_output(output_idx).dims(),
+                               'data_type' : str(node.get_operator().get_output(output_idx).dtype()),
+                              'data_format' : str(node.get_operator().get_output(output_idx).dformat())}
+                outputs.append(output_dict)
+
+            node_dict['outputs'] = outputs
+
+            parents = node.get_parents()
+            if None in parents:
+                if parents[0] is None: parents.append(parents.pop(0))
+            else:
+                pass
+
+            parents_inputs = []
+            input_idx = 0
+            for parent in node.get_parents():
+                if parent is not None:
+                    for children in parent.outputs():
+                        for child in children:
+                            if child[0] == node and child[1] == input_idx:
+                                parents_inputs.append((parent.name(), input_idx))
+
+                elif parent is None:
+                    if input_idx not in [item[1] for item in parents_inputs]:
+                        parents_inputs.append((None, input_idx))
+
+                input_idx += 1
+            node_dict['parents'] = parents_inputs
+
+            children_outputs = []
+            output_idx = 0
+            for children in node.get_ordered_children():
+                for child in children:
+                    if child is not None:
+                        for parent in child.inputs():
+                            if parent[0] == node and parent[1] == output_idx:
+                                children_outputs.append((child.name(), output_idx))
+                output_idx += 1
+            node_dict['children'] = children_outputs
+
+            # Check if my node is a metaop
+            attributes_dict = {}
+            if isinstance(node.get_operator(), aidge_core.MetaOperatorOp):
+                attributes_dict['micro_graph'] = []
+                for micro_node in node.get_operator().get_micro_graph().get_nodes():
+                    micro_node_dict = {'name' : micro_node.name(),
+                                        'optype' : micro_node.type()}
+
+                    micro_node_attr_dict =  _retrieve_operator_attrs(micro_node)
+                    micro_node_dict['attributes'] = micro_node_attr_dict
+                    attributes_dict['micro_graph'].append(micro_node_dict)
+
+            else:
+                node_attr_dict = _retrieve_operator_attrs(node)
+                attributes_dict.update(node_attr_dict)
+
+            node_dict['attributes'] = attributes_dict
+
+            if node.type() == 'Producer':
+                if write_trainable_params_ext:
+
+                    params_file_format.casefold()
+
+                    if params_file_format=='npz':
+                        np.savez_compressed(Path(path_trainable_params, node.name()), **{node.name() : node.get_operator().get_output(0)})
+                        node_dict['tensor_data'] = str(Path(path_trainable_params, node.name() + '.npz'))
+
+                    elif params_file_format=='json':
+                        tensor = np.array(node.get_operator().get_output(0))
+                        tensor_dict = {
+                            node.name() :
+                            {
+                                'dims' : tensor.shape,
+                                'data_type' : str(tensor.dtype),
+                                'tensor_data' : tensor.tolist()
+                            }
+                        }
+
+                        with open(Path(path_trainable_params, node.name() + '.json'), 'w') as fp:
+                            json.dump(tensor_dict, fp, indent=4)
+
+                        node_dict['tensor_data'] = str(Path(path_trainable_params, node.name() + '.json'))
+
+                    else:
+                        raise Exception("File format to write trainable parameters not recognized.")
+
+
+                if write_trainable_params_embed:
+                    node_dict['tensor_data'] = np.array(node.get_operator().get_output(0)).tolist()
+
+                else:
+                    pass
+
+            graphview_dict['graph'].append(node_dict)
+
+        else: # node is None
+            pass
+
+    return graphview_dict
+
+def _write_dict_json(graphview_dict : Dict[str, Optional[Any]], json_path : str) -> None:
+    """
+    Writes dictionary containing GraphView description to a JSON file.
+
+    :param graphview_dict: A dictionary with the GraphView description.
+    :type graphview_dict: dict[str, int, float, bool, None]
+    :param json_path: Path to write JSON file.
+    :type json_path: str
+    """
+
+    with open(json_path, 'w') as fp:
+        json.dump(graphview_dict, fp, indent=4)
+
+    return None
+
+def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainable_params_embed : bool = False, write_trainable_params_ext : bool = False, params_file_format : str = 'json') -> None:
+    """
+    Generates the description for a GraphView in the JSON format.
+
+    :param graph: A GraphView of Aidge.
+    :type graph: aidge_core.GraphView
+    :param json_path: Path to write JSON file.
+    :type json_path: Path
+    :param write_trainable_params_embed: Whether or not to write the eventual trainable parameters of the Nodes in the same file as the dict (embed).
+    :type write_trainable_params_embed: bool, optional
+    :param write_trainable_params_ext: Whether or not to write the eventual trainable parameters of the Nodes in an external file.
+    :type write_trainable_params_ext: bool, optional
+    :param params_file_format: Format of the external file used to store the Nodes' trainable parameters. Options: ``npz`` or ``json``. Default : ``json``. Requires ``write_trainable_params_ext``.
+    :type params_file_format: str, optional
+    """
+
+    if not json_path.suffix:
+        if not json_path.is_dir():
+            json_path.mkdir(parents=True, exist_ok=True)
+        json_path = json_path.joinpath('model.json')
+
+    else:
+        if json_path.suffix != '.json':
+            raise Exception('If ``json_path`` contains a filename, it must be of JSON format.')
+        if not json_path.parent.is_dir():
+            json_path.parent.mkdir(parents=True, exist_ok=True)
+
+    if write_trainable_params_ext:
+        path_trainable_params = (json_path.parent).joinpath(json_path.stem +  '_trainable_params/')
+        path_trainable_params.mkdir(parents=True, exist_ok=True)
+
+    else:
+        path_trainable_params = Path()
+
+    if isinstance(gview, aidge_core.GraphView):
+        # Sort GraphView in topological order
+        ordered_nodes = gview.get_ordered_nodes()
+
+        # Create dict from GraphView
+        graphview_dict = _create_dict(ordered_nodes, write_trainable_params_embed, write_trainable_params_ext, path_trainable_params, params_file_format)
+
+        # Write dict to JSON
+        _write_dict_json(graphview_dict, json_path)
+
+    else:
+        raise Exception("Graph must be an instance of aidge_core.GraphView.")
+
+    return None
\ No newline at end of file
diff --git a/aidge_core/simplify_graph.py b/aidge_core/simplify_graph.py
new file mode 100644
index 0000000000000000000000000000000000000000..30ee04e6c62c1dfd465d425d1b430d2b5813383b
--- /dev/null
+++ b/aidge_core/simplify_graph.py
@@ -0,0 +1,56 @@
+import numpy as np
+import aidge_core
+
+def simplify_graph(graph: aidge_core.GraphView):
+    """
+    Simplify a graph loaded from ONNX.
+
+    :param graph: The GraphView to simplify.
+    :type graph: aidge_core.GraphView
+    """
+
+    def check_constant_producer(value):
+        def _check_constant_producer(node):
+            out = node.get_operator().get_output(0)
+            return (len(out) == 1 and np.isclose(out[0], value))
+        return _check_constant_producer
+
+    gm = aidge_core.SinglePassGraphMatching(graph)
+    gm.add_node_lambda("Constant_sqrt2", check_constant_producer(np.sqrt(2)))
+    gm.add_node_lambda("Constant_1", check_constant_producer(1))
+    gm.add_node_lambda("Constant_0_5", check_constant_producer(0.5))
+
+    # Linear [from PyTorch ONNX]
+    aidge_core.fuse_to_metaops(gm, "MatMul-*>Add", "Linear")
+
+    # LayerNorm [from PyTorch ONNX]
+    aidge_core.fuse_to_metaops(gm, "ReduceMean-*>Sub#1~>(Pow#1->ReduceMean-*>Add#1->Sqrt)-*>Div#1-*>Mul#1-*>Add#2;"
+                                   "Sub#1~*>Div#1;"
+                                   "Pow#1<1~Producer;"
+                                   "Add#1<*~Producer;"
+                                   "Mul#1<*~Producer;"
+                                   "Add#2<*~Producer;"
+                                   "Sub#1~>$", "LayerNorm")
+
+    # ScaledDotProductAttention [from PyTorch ONNX]
+    aidge_core.fuse_to_metaops(gm, "MatMul->Div#1->Softmax-*>MatMul;"
+                                   "Div#1<1~Producer", "ScaledDotProductAttention")
+
+    # MultiHeadAttention [from PyTorch ONNX]
+    aidge_core.fuse_to_metaops(gm, "ScaledDotProductAttention#1->Transpose->Reshape#1->Linear;"
+                                   "Reshape#1<1~Producer;"
+                                   "ScaledDotProductAttention#1<0-(Transpose<-Reshape#2<-Add#1);"
+                                   "ScaledDotProductAttention#1<1-(Transpose<-Reshape#3<-Add#2);"
+                                   "ScaledDotProductAttention#1<2-(Transpose<-Reshape#4<-Add#3);"
+                                   "Reshape#2<1~Producer;"
+                                   "Add#1<*-0-Split#1;"
+                                   "Add#2<*-1-Split#1;"
+                                   "Add#3<*-2-Split#1;"
+                                   "Split#1<-MatMul;"
+                                   "Split#1<1~Producer", "MultiHeadAttention")
+
+    # GeLU [from PyTorch ONNX]
+    aidge_core.fuse_to_metaops(gm, "Div#1->Erf->Add#1-*>Mul->Mul#2;"
+                                   "Div#1<1~Producer[Constant_sqrt2];"
+                                   "Add#1<*~Producer[Constant_1];"
+                                   "Mul#2<*~Producer[Constant_0_5]", "GeLU")
diff --git a/aidge_core/static_analysis.py b/aidge_core/static_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..c65a102a10601605cd2ca988a2ad3cf2cbd00e6e
--- /dev/null
+++ b/aidge_core/static_analysis.py
@@ -0,0 +1,195 @@
+import matplotlib
+import matplotlib.pyplot as plt
+from functools import partial
+import numpy as np
+import aidge_core
+
+class StaticAnalysisExt(aidge_core.StaticAnalysis):
+    def log_nb_params(self, filename, title=None, log_scale=False):
+        namePtrTable = self.get_graph().get_ranked_nodes_name("{0} ({1}#{3})");
+        nodes = self.get_graph().get_ordered_nodes()
+        series = []
+        legend = None
+
+        for node in nodes:
+            if node.type() == "Producer":
+                continue
+
+            name = namePtrTable[node]
+            series.append([name, self.get_nb_params(node)])
+
+        if title is None: title = "log_nb_params"
+        if filename is not None:
+            self._log_bar(series, filename, title, legend, log_scale)
+        return series
+
+    def log_params_size(self, filename, title=None, log_scale=False):
+        namePtrTable = self.get_graph().get_ranked_nodes_name("{0} ({1}#{3})");
+        nodes = self.get_graph().get_ordered_nodes()
+        series = []
+        legend = None
+
+        for node in nodes:
+            if node.type() == "Producer":
+                continue
+
+            name = namePtrTable[node]
+            series.append([name, self.log_params_size(node)])
+
+        if title is None: title = "log_params_size"
+        if filename is not None:
+            self._log_bar(series, filename, title, legend, log_scale)
+        return series
+
+    def log_nb_arithm_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_arithm_ops, filename, title, log_scale)
+
+    def log_nb_logic_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_logic_ops, filename, title, log_scale)
+
+    def log_nb_comp_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_comp_ops, filename, title, log_scale)
+
+    def log_nb_nl_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_nl_ops, filename, title, log_scale)
+
+    def log_nb_mac_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_mac_ops, filename, title, log_scale)
+
+    def log_nb_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_ops, filename, title, log_scale)
+
+    def log_nb_arithm_int_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_arithm_int_ops, filename, title, log_scale)
+
+    def log_nb_arithm_fp_ops(self, filename, title=None, log_scale=False):
+        return self._log_callback(aidge_core.OperatorStats.get_nb_arithm_fp_ops, filename, title, log_scale)
+
+    def log_nb_ops_by_type(self, filename, title=None, log_scale=False):
+        return self._log_callback([aidge_core.OperatorStats.get_nb_arithm_int_ops,
+                                  aidge_core.OperatorStats.get_nb_arithm_fp_ops,
+                                  aidge_core.OperatorStats.get_nb_logic_ops,
+                                  aidge_core.OperatorStats.get_nb_comp_ops,
+                                  aidge_core.OperatorStats.get_nb_nl_ops], filename, title, log_scale)
+
+    def _log_callback(self, callback, filename, title=None, log_scale=False):
+        """
+        Log a statistic given by an OperatorStats callback member function.
+        Usage:
+
+            stats = StaticAnalysisExt(model)
+            stats.log_callback(aidge_core.OperatorStats.get_nb_params, "stats.png", "Nb params per operator")
+
+        :param func: OperatorStats member function to call.
+        :param filename: Output graph file name.
+        :type filename: str
+        :param title: Title of the graph.
+        :type title: str
+        """
+
+        namePtrTable = self.get_graph().get_ranked_nodes_name("{0} ({1}#{3})");
+        nodes = self.get_graph().get_ordered_nodes()
+        series = []
+        legend = None
+
+        for node in nodes:
+            if node.type() == "Producer":
+                continue
+
+            stats = self.get_op_stats(node)
+            name = namePtrTable[node]
+            attr = {}
+            if type(node.get_operator()) is aidge_core.GenericOperatorOp:
+                # Display Generic Op in orange
+                attr = {'color': 'orange'}
+            elif not node.get_operator().is_atomic():
+                # Display Meta Op in bold
+                attr = {'fontweight': 'bold'}
+            elif node.type() not in aidge_core.get_keys_OperatorStats():
+                # Display unsupported operator in red labels
+                attr = {'color': 'red'}
+            if attr:
+                name = (name, attr)
+            if isinstance(callback, list):
+                series.append([name, [partial(cb, stats)() for cb in callback]])
+                legend = [cb.__name__ for cb in callback]
+                if title is None: title = str(legend)
+            else:
+                series.append([name, partial(callback, stats)()])
+                if title is None: title = callback.__name__
+
+        if title is None: title = str(callback)
+        if filename is not None:
+            self._log_bar(series, filename, title, legend, log_scale)
+        return series
+
+    def _log_bar(self, series, filename, title=None, legend=None, log_scale=False):
+        names, values = zip(*series)
+        names_only = [item[0] if isinstance(item, tuple) else item for item in names]
+        fig, ax = plt.subplots(figsize=(max(5, len(names)/4), 5))
+        plt.xlim(-0.5, len(names) - 0.5)
+        if isinstance(values[0], list):
+            series = [list(i) for i in zip(*values)]
+            bot = np.zeros(len(series[0]))
+            for i, serie in enumerate(series):
+                plt.bar(names_only, serie, bottom=bot)
+                bot += serie
+        else:
+            plt.bar(names_only, values)
+        ax.yaxis.minorticks_on()
+        plt.grid(axis='y', which='major', linestyle='--', color='gray')
+        plt.grid(axis='y', which='minor', linestyle=':', color='lightgray')
+        formatter0 = matplotlib.ticker.EngFormatter(unit='')
+        ax.yaxis.set_major_formatter(formatter0)
+        plt.gca().set_axisbelow(True)
+
+        labels = plt.gca().get_xticks()
+        tick_labels = plt.gca().get_xticklabels()
+        for i, label in enumerate(labels):
+            if isinstance(names[i], tuple):
+                if 'color' in names[i][1]:
+                    tick_labels[i].set_color(names[i][1]['color'])
+                elif 'fontweight' in names[i][1]:
+                    tick_labels[i].set_fontweight(names[i][1]['fontweight'])
+
+        plt.xticks(rotation='vertical')
+        if log_scale: plt.yscale('log')
+        if title is not None: plt.title(title)
+        if legend is not None: plt.legend(legend)
+        plt.savefig(filename, bbox_inches='tight')
+
+    def _log_barh(self, series, filename, title=None, legend=None, log_scale=False):
+        names, values = zip(*series)
+        names_only = [item[0] if isinstance(item, tuple) else item for item in names]
+        fig, ax = plt.subplots(figsize=(10, max(5, len(names)/4)))
+        plt.ylim(-0.5, len(names) - 0.5)
+        if isinstance(values[0], list):
+            series = [list(i) for i in zip(*values)]
+            left = np.zeros(len(series[0]))
+            for i, serie in enumerate(series):
+                plt.barh(names_only, serie, left=left)
+                left += serie
+        else:
+            plt.barh(names_only, values)
+        ax.xaxis.minorticks_on()
+        plt.grid(axis='x', which='major', linestyle='--', color='gray')
+        plt.grid(axis='x', which='minor', linestyle=':', color='lightgray')
+        formatter0 = matplotlib.ticker.EngFormatter(unit='')
+        ax.xaxis.set_major_formatter(formatter0)
+        plt.gca().set_axisbelow(True)
+        plt.gca().xaxis.set_label_position('top')
+        plt.gca().xaxis.tick_top()
+
+        labels = plt.gca().get_yticks()
+        tick_labels = plt.gca().get_yticklabels()
+        for i, label in enumerate(labels):
+            if isinstance(names[i], tuple):
+                if 'color' in names[i][1]:
+                    tick_labels[i].set_color(names[i][1]['color'])
+                elif 'fontweight' in names[i][1]:
+                    tick_labels[i].set_fontweight(names[i][1]['fontweight'])
+
+        if log_scale: plt.xscale('log')
+        if title is not None: plt.title(title)
+        if legend is not None: plt.legend(legend)
+        plt.savefig(filename, bbox_inches='tight')
diff --git a/aidge_core/testing/__init__.py b/aidge_core/testing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6966bbb6355c798727870ace37f99193fdda66a2
--- /dev/null
+++ b/aidge_core/testing/__init__.py
@@ -0,0 +1,12 @@
+#
+# Do not add there auto import of submodules.
+#
+# The testing module contains utils and other tools
+# related to tests, possibly reusable by other aidge
+# components unit_tests.
+#
+# Import a specific module explicitly with for instance:
+# import aidge_core.testing.utils
+# or
+# from aidge_core.testing.utils import (....,)
+#
diff --git a/aidge_core/testing/utils/__init__.py b/aidge_core/testing/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d69be0c5b80c7df229d8a24e6891970b8108fb14
--- /dev/null
+++ b/aidge_core/testing/utils/__init__.py
@@ -0,0 +1,10 @@
+#
+# Should provide some general utility functions for testing.
+# For instance:
+# - filesystem
+# - os dependencies
+# - unit tests setup
+#
+
+from .tree_cache import tree_update_from_cache
+from .tree_utils import tree_move, tree_remove
diff --git a/aidge_core/testing/utils/tree_cache.py b/aidge_core/testing/utils/tree_cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bb4f7734a46c1c2ec35c573fa3f72d0cae5e736
--- /dev/null
+++ b/aidge_core/testing/utils/tree_cache.py
@@ -0,0 +1,161 @@
+"""
+
+Provide tree_update_from_cache(path) method which
+minimize changes in a generated tree when files are
+re-generated but identical.
+
+It takes as argument a generated tree, and optionally a cache path.
+Then it will update both the generated tree and the cache tree
+to take the cache version of the files when identical, or the newly
+generated one otherwise.
+
+This is in particular useful for speeding up iterative compilation
+when generating a source/build system tree.
+
+For instance:
+- first time, one generates a tree of files:
+  - generated: path/{t1,t2,t3}
+- then call tree_update_from_cache("path")
+  - will generate: __cache_path/{t1,t2,t3}
+  - and untouch: path/{t1,t2,t3}
+- second time, re-generate a tree of file:
+  - say generated files are identical: path/{t1,t2,t3}
+- then call tree_update_from_cache("path")
+  - will untouch in cache: __cache_path/{t1,t2,t3}
+  - and reset to previous timestamps files: path/{t1,t2,t3}
+- third time, re-generate again with some changes:
+  - say t1 is identical, t2 content has changed and no t3: path/{t1,t2'}
+- then call tree_update_from_cache("path")
+  - will update t2' and remove t3 in cache: __cache_path/{t1,t2'}
+  - and reset to previous timestamp t1: path/{t1,t2'}
+
+Note that by default the `dir`/__cache_`name` cache path is used
+for a given path `dir`/`name`.
+Though it is also possible to have the cache path inside the generated tree,
+in this case use for instance:
+
+    tree_update_from_cache(path, Path(path) / "__cache_src")
+
+For more evolved scenarii, specialize the provided FileTreeCache class.
+
+"""
+
+
+from pathlib import Path
+import shutil
+import sys
+import filecmp
+from typing import Optional, Union, List
+
+from .tree_utils import tree_move, tree_remove
+
+
+__all__ = [
+    "FileTreeCache",
+    "tree_update_from_cache",
+]
+
+def is_relative_to(path: Path, other: Path) -> bool:
+    """
+    Dynamically choose implementation based on Python version
+    """
+    # Python 3.9+
+    if sys.version_info >= (3, 9):
+        return path.is_relative_to(other)
+
+    # Python 3.8 and earlier
+    try:
+        path.relative_to(other)
+        return True
+    except ValueError:
+        return False
+
+
+class FileTreeCache():
+    """
+    Class for implementation of the file tree cache.
+    Can be derived to changes for instance default cache name/tmp name prefixes
+    or to specialize for other contexts.
+    """
+    default_cache_prefix = "__cache_"
+    default_tmp_cache_prefix = "__tmp_cache_"
+    default_tmp_prefix = "__tmp_"
+
+    def __init__(self,
+                 src_path: Union[str, Path],
+                 cache_path: Optional[Union[str, Path]] = None
+                 ) -> None:
+        self.src_path = Path(src_path).absolute()
+        self.cache_path = (
+            Path(cache_path).absolute()
+            if cache_path is not None else
+            (self.src_path.parent /
+             f"{self.default_cache_prefix}{self.src_path.name}")
+        )
+        ctx_msg = f"tree_cache: {src_path = }, {cache_path = }"
+        assert self.src_path != self.cache_path, f"src_path and cache_path must differ on {ctx_msg}"
+        assert not is_relative_to(self.src_path, self.cache_path), f"src_path must not be relative to cache_path on {ctx_msg}"
+        self._tmp_path = (
+            self.src_path.parent /
+            f"{self.default_tmp_prefix}{self.src_path.name}")
+        self._tmp_cache_path = (
+            self.src_path.parent /
+            f"{self.default_tmp_cache_prefix}{self.src_path.name}")
+
+    @classmethod
+    def _copytree_or_cache(cls, src_dir: Path, dst_dir: Path, cache_dir: Path, dst_cache_dir: Path) -> None:
+        assert not dst_dir.exists()
+        assert not dst_cache_dir.exists()
+        assert src_dir.is_dir()
+        assert not cache_dir.exists() or cache_dir.is_dir()
+        assert not is_relative_to(cache_dir, src_dir)
+
+        def copy_or_cache(src, dst):
+            base_src = Path(src).relative_to(src_dir)
+            cache_src = cache_dir / base_src
+            base_dst = Path(dst).relative_to(dst_dir)
+            cache_dst = dst_cache_dir / base_dst
+            cache_dst.parent.mkdir(parents=True, exist_ok=True)
+            if cache_src.exists() and filecmp.cmp(str(src), str(cache_src), shallow=False):
+                shutil.copy2(str(cache_src), str(cache_dst))
+                shutil.copy2(str(cache_src), dst)
+            else:
+                shutil.copy2(src, str(cache_dst))
+                shutil.copy2(src, dst)
+        shutil.copytree(str(src_dir), str(dst_dir), copy_function=copy_or_cache)
+
+    def update_from_cache(self) -> None:
+        assert self.src_path.exists(), f"src path must exist before swapping with cache"
+
+        # Move cache path apart first as it may be relative to source path
+        tree_move(self.cache_path, self._tmp_cache_path, ignore_missing=True, exist_ok=True)
+        # Move source path apart before recreating merged source tree
+        tree_move(self.src_path, self._tmp_path, exist_ok=True)
+
+        # Manage the source/cache merge to the dst/dst_cahe with a variant of
+        # copytree.
+        self._copytree_or_cache(
+            src_dir=self._tmp_path,
+            dst_dir=self.src_path,
+            cache_dir=self._tmp_cache_path,
+            dst_cache_dir=self.cache_path,
+        )
+
+        # Remove tmp source path
+        tree_remove(self._tmp_path)
+        # Note that the tmp cache path may not exist
+        tree_remove(self._tmp_cache_path, ignore_missing=True)
+
+
+def tree_update_from_cache(
+        src_path: Union[str, Path],
+        cache_path: Optional[Union[str, Path]] = None) -> None:
+    """
+    Update from cache the current generation of a tree from the
+    older generations, preserving file stamps when files contents are identical.
+
+    :param src_path: str or Path object to the generated tree
+    :param cache_path: optional str or Path object to the cache path,
+    or defaults to: `cache_path = src_path.parent / f"__cache_{src_path.name}"`
+    """
+    FileTreeCache(src_path, cache_path).update_from_cache()
diff --git a/aidge_core/testing/utils/tree_utils.py b/aidge_core/testing/utils/tree_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..990ab2641f5aa914a9d8c03105c00d8c05d1243f
--- /dev/null
+++ b/aidge_core/testing/utils/tree_utils.py
@@ -0,0 +1,82 @@
+"""
+
+Provide utility function for file trees manipulations.
+
+"""
+
+import shutil
+import sys
+from pathlib import Path
+from typing import Union
+
+
+__all__ = [
+    "tree_move",
+    "tree_remove",
+]
+
+
+def is_relative_to(path: Path, other: Path) -> bool:
+    """
+    Dynamically choose implementation based on Python version
+    """
+    # Python 3.9+
+    if sys.version_info >= (3, 9):
+        return path.is_relative_to(other)
+
+    # Python 3.8 and earlier
+    try:
+        path.relative_to(other)
+        return True
+    except ValueError:
+        return False
+
+
+def tree_remove(
+        path: Union[str, Path],
+        ignore_missing: bool = False,
+) -> None:
+    """
+    Remove the full tree at path.
+    Optionally ignore if the path does not exist when ignore_missing is True.
+
+    :param path: str or Path object to the directory path
+    :param ignore_missing: if True will return early is path does not exists
+    """
+    path = Path(path)
+    ctx_msg = f"tree_remove: : {path = }"
+    assert ignore_missing or path.exists(), f"path must exists when ignore_missing is False on {ctx_msg}"
+    if ignore_missing and not path.exists():
+        return
+    shutil.rmtree(path)
+
+
+def tree_move(
+        src_path: Union[str, Path],
+        dst_path: Union[str, Path],
+        ignore_missing: bool = False,
+        exist_ok: bool = False,
+) -> None:
+    """
+    Move the whole src_path file tree to dst_path.
+    Optionally does nothing if the src path does not exists and ignore_missing is True.
+    Optionally the full dst_path will be removed first when exists_ok is True.
+
+    :param src_path: str or Path object to the source directory path
+    :param dst_path: str or Path object to the new path name for the source directory
+    :param ignore_missing: if True will return early is src_path does not exists
+    :param exist_ok: if True will first erase the new path name if it exists
+    """
+    src_path = Path(src_path)
+    dst_path = Path(dst_path)
+    ctx_msg = f"tree_move: : {src_path = }, {dst_path = }"
+    assert ignore_missing or src_path.exists(), f"src_path must exists when ignore_missing is False on {ctx_msg}"
+    assert exist_ok or not dst_path.exists(), f"dst_path must not exists when exist_ok is False on {ctx_msg}"
+    assert src_path != dst_path, f"paths must not be identical on {ctx_msg}"
+    assert not is_relative_to(dst_path, src_path), f"dst_path must not be relative to src_path on {ctx_msg}"
+    assert not is_relative_to(src_path, dst_path), f"src_path must not be relative to dst_path on {ctx_msg}"
+    if ignore_missing and not src_path.exists():
+        return
+    if exist_ok and dst_path.exists():
+        shutil.rmtree(dst_path)
+    shutil.move(src_path, dst_path)
diff --git a/aidge_core/unit_tests/test_export.py b/aidge_core/unit_tests/test_export.py
index 5d2e700a86925d1455cdee83e7d40cd891e72ba6..d98a6fdbc20dd7e99169422205a4e680350aed27 100644
--- a/aidge_core/unit_tests/test_export.py
+++ b/aidge_core/unit_tests/test_export.py
@@ -8,8 +8,6 @@ http://www.eclipse.org/legal/epl-2.0.
 SPDX-License-Identifier: EPL-2.0
 """
 
-import aidge_core
-from aidge_core.utils import run_command
 import unittest
 import os
 import pathlib
@@ -18,6 +16,10 @@ import subprocess
 import sys
 
 
+import aidge_core
+from aidge_core.utils import run_command
+from aidge_core.testing.utils import tree_update_from_cache, tree_move, tree_remove
+
 def initFiller(model):
     # Initialize parameters (weights and biases)
     for node in model.get_nodes():
@@ -26,6 +28,8 @@ def initFiller(model):
             value = prod_op.get_output(0)
             value.set_backend("cpu")
             tuple_out = node.output(0)[0]
+            # Force seed before filler for reproducibility
+            aidge_core.random.Generator.set_seed(0)
             # No conv in current network
             if tuple_out[0].type() == "Conv" and tuple_out[1] == 1:
                 # Conv weight
@@ -43,22 +47,6 @@ def initFiller(model):
                 pass
 
 
-def clean_dir(dir: pathlib.Path) -> None:
-    if not dir.is_dir():
-        print(f"Error : directory {dir} doesn't exist. Exiting clean_dir().")
-        return
-    for filename in os.listdir(dir):
-        file_path = os.path.join(dir, filename)
-        try:
-            if os.path.isfile(file_path) or os.path.islink(file_path):
-                os.unlink(file_path)
-            elif os.path.isdir(file_path):
-                shutil.rmtree(file_path)
-        except Exception as e:
-            print(f"Failed to delete {file_path}. Reason: {e}")
-    return
-
-
 class test_export(unittest.TestCase):
     """Test aidge export"""
 
@@ -66,6 +54,10 @@ class test_export(unittest.TestCase):
         self.EXPORT_PATH: pathlib.Path = pathlib.Path("dummy_export")
         self.BUILD_DIR: pathlib.Path = self.EXPORT_PATH / "build"
         self.INSTALL_DIR: pathlib.Path = (self.EXPORT_PATH / "install").absolute()
+        self.TMP_BUILD_DIR: pathlib.Path = (
+            self.EXPORT_PATH.parent /
+            f"__tmp_{self.EXPORT_PATH.name}_build"
+        )
 
     def tearDown(self):
         pass
@@ -76,28 +68,41 @@ class test_export(unittest.TestCase):
         model = aidge_core.sequential(
             [
                 aidge_core.FC(
-                    in_channels=32 * 32 * 3, out_channels=512, name="InputNode"
+                    in_channels=32 * 32 * 3, out_channels=64, name="InputNode"
                 ),
                 aidge_core.ReLU(name="Relu0"),
-                aidge_core.FC(in_channels=512, out_channels=256, name="FC1"),
+                aidge_core.FC(in_channels=64, out_channels=32, name="FC1"),
                 aidge_core.ReLU(name="Relu1"),
-                aidge_core.FC(in_channels=256, out_channels=128, name="FC2"),
+                aidge_core.FC(in_channels=32, out_channels=16, name="FC2"),
                 aidge_core.ReLU(name="Relu2"),
-                aidge_core.FC(in_channels=128, out_channels=10, name="OutputNode"),
+                aidge_core.FC(in_channels=16, out_channels=10, name="OutputNode"),
             ]
         )
 
         initFiller(model)
+        model.forward_dims([[1, 32*32*3]])
 
-        # Export model
-        aidge_core.export(self.EXPORT_PATH, model)
+        # Preserve previously generated build if present
+        tree_move(self.BUILD_DIR, self.TMP_BUILD_DIR, ignore_missing=True, exist_ok=True)
+        # Clean install dir
+        tree_remove(self.INSTALL_DIR, ignore_missing=True)
 
-        self.assertTrue(
-            self.EXPORT_PATH.is_dir(), "Export folder has not been generated"
+        # Export model
+        aidge_core.serialize_to_cpp(self.EXPORT_PATH, model)
+        self.assertTrue(self.EXPORT_PATH.is_dir(), "Export folder has not been generated")
+        # Add other source files
+        shutil.copyfile(pathlib.Path(__file__).parent / "static/main.cpp", self.EXPORT_PATH / "main.cpp")
+
+        # Use cache if any, put cache inside export dir
+        # such that cleaning export dir also cleans the cache
+        tree_update_from_cache(
+            self.EXPORT_PATH,
+            cache_path=self.EXPORT_PATH / "__cache_export"
         )
-        os.makedirs(self.BUILD_DIR, exist_ok=True)
-        clean_dir(self.BUILD_DIR)  # if build dir existed already ensure its emptyness
-        clean_dir(self.INSTALL_DIR)
+
+        # Move back preserved build dir if any and ensure build dir exists
+        tree_move(self.TMP_BUILD_DIR, self.BUILD_DIR, ignore_missing=True)
+        self.BUILD_DIR.mkdir(exist_ok=True)
 
         # Test compilation of export
         search_path = (
@@ -106,11 +111,6 @@ class test_export(unittest.TestCase):
             else os.environ["AIDGE_INSTALL"]
         )
 
-        shutil.copyfile(
-            pathlib.Path(__file__).parent / "static/main.cpp",
-            self.EXPORT_PATH / "main.cpp",
-        )
-
         ##########################
         # CMAKE EXPORT
         try:
diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py
index 26d60f2fbaf0f3903baf191cf0a2ad5550fb3275..4d3b0a34485e7e0d7e0323f2fc121a83b1882de9 100644
--- a/aidge_core/unit_tests/test_impl.py
+++ b/aidge_core/unit_tests/test_impl.py
@@ -49,8 +49,8 @@ class test_OperatorImpl(unittest.TestCase):
         """Test registering an implementation
         """
         global GLOBAL_CPT
-        aidge_core.register_ConvOp2D("cpu", testImpl)
-        self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
+        aidge_core.register_Conv2DOp("cpu", testImpl)
+        self.assertTrue("cpu" in aidge_core.get_keys_Conv2DOp())
         conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
         conv.get_operator().set_backend("cpu")
         conv.get_operator().set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
@@ -61,9 +61,9 @@ class test_OperatorImpl(unittest.TestCase):
         """Test registering an implementation
         """
         global GLOBAL_CPT
-        aidge_core.register_ConvOp2D("cpu", testImpl)
+        aidge_core.register_Conv2DOp("cpu", testImpl)
         aidge_core.register_ProducerOp("cpu", testImpl)
-        self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
+        self.assertTrue("cpu" in aidge_core.get_keys_Conv2DOp())
         conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
         model = aidge_core.sequential([conv])
         model.set_backend("cpu")
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index 8d6f2686d9010ac4ebed80cd04f74effe763e977..21b62e92b501215fc51651b7ca7a7bcb5237b027 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -101,7 +101,7 @@ class test_operator_binding(unittest.TestCase):
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
 
         op = aidge_core.GenericOperatorOp("any_type", 1,0,1)
-        with self.assertRaises(RuntimeError):
+        with self.assertRaises(IndexError):
             op.attr.something
 
         op.attr.something = aidge_core.DynamicAttributes()
diff --git a/aidge_core/unit_tests/test_recipes.py b/aidge_core/unit_tests/test_recipes.py
index c8dd4c727fbaf8224e8d04111a5054caeb5e5c99..f4dd0220ecdc5950e1b1dcef0d8bf2d4782216bf 100644
--- a/aidge_core/unit_tests/test_recipes.py
+++ b/aidge_core/unit_tests/test_recipes.py
@@ -46,9 +46,9 @@ class test_recipes(unittest.TestCase):
 
     def test_fuse_matmul_add(self):
         matmul0 = aidge_core.MatMul(name="MatMul0")
-        add0 = aidge_core.Add(2, name="Add0")
+        add0 = aidge_core.Add(name="Add0")
         matmul1 = aidge_core.MatMul(name="MatMul1")
-        add1 = aidge_core.Add(2, name="Add1")
+        add1 = aidge_core.Add(name="Add1")
         w0 = aidge_core.Producer([1, 1], name="W0")
         w0.add_child(matmul0, 0, 0)
         b0 = aidge_core.Producer([1], name="B0")
diff --git a/aidge_core/unit_tests/test_show_graphview.py b/aidge_core/unit_tests/test_show_graphview.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c68e93e39a543e96c2b664dbe554660bf37cc91
--- /dev/null
+++ b/aidge_core/unit_tests/test_show_graphview.py
@@ -0,0 +1,129 @@
+import json
+import tempfile
+import unittest
+import builtins
+import aidge_core
+from pathlib import Path
+from aidge_core.show_graphview import gview_to_json
+
+def create_gview():
+    # Create a LeNet-like model
+    gview = aidge_core.sequential([aidge_core.PaddedConv2D(in_channels=1, out_channels=6, kernel_dims=[5,5], name='feature_feature_0_Conv', stride_dims=[1,1], padding_dims = [2,2,2,2]),
+                               aidge_core.ReLU(name='feature_feature_1_Relu'),
+                               aidge_core.MaxPooling2D(kernel_dims=[2,2], stride_dims=[2,2], ceil_mode=0, name='feature_feature_2_MaxPool'),
+                               aidge_core.Conv2D(in_channels=6, out_channels=16, kernel_dims=[5,5], name='feature_feature_3_Conv', stride_dims=[1,1], dilation_dims = [1,1]),
+                               aidge_core.ReLU(name='feature_feature_4_Relu'),
+                               aidge_core.MaxPooling2D(kernel_dims=[2,2], stride_dims=[2,2], ceil_mode=0, name='feature_feature_5_MaxPool'),
+                               aidge_core.FC(in_channels=400, out_channels=120, name='classifier_classifier_1_Gemm'),
+                               aidge_core.ReLU(name='classifier_classifier_2_Relu'),
+                               aidge_core.FC(in_channels=120, out_channels=84, name='classifier_classifier_3_Gemm'),
+                               aidge_core.ReLU(name='classifier_classifier_4_Relu'),
+                               aidge_core.FC(in_channels=84, out_channels=10, name='classifier_classifier_5_Gemm'),
+                            ])
+
+    # Fill Producers
+    for node in gview.get_nodes():
+        if node.type() == "Producer":
+            prod_op = node.get_operator()
+            value = prod_op.get_output(0)
+            value.set_backend("cpu")
+            tuple_out = node.output(0)[0]
+            
+            if (tuple_out[0].type() == "Conv" or tuple_out[0].type() == "PaddedConv") and tuple_out[1]==1:
+                # Conv weight
+                aidge_core.xavier_uniform_filler(value)
+            elif tuple_out[0].type() == "Conv" and tuple_out[1]==2:
+                # Conv bias
+                aidge_core.constant_filler(value, 0.01)
+            elif tuple_out[0].type() == "FC" and tuple_out[1]==1:
+                # FC weight
+                aidge_core.normal_filler(value)
+            elif tuple_out[0].type() == "FC" and tuple_out[1]==2:
+                # FC bias
+                aidge_core.constant_filler(value, 0.01)
+            else:
+                pass
+
+    # Compile model
+    gview.forward_dims([[1, 1, 28, 28]]) 
+    gview.set_datatype(aidge_core.dtype.float32)
+
+    return gview
+
+class test_show_gview(unittest.TestCase):
+    """Test aidge functionality to show GraphView.    
+    """
+
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    def test_gview_to_json(self):
+        
+        gview = create_gview()
+
+        # Create temporary file to store JSON model description             
+        model_description_file = tempfile.NamedTemporaryFile(mode="w+", suffix='.json')
+
+        gview_to_json(gview, Path(model_description_file.name))
+
+        # Load JSON
+        with open(model_description_file.name, 'r') as fp:
+                model_json = json.load(fp)
+
+        # Get list of nodes of Aidge graphview
+        gview_ordered_nodes = gview.get_ordered_nodes()
+
+        # Iterate over the list of ordered nodes and the corresponding JSON 
+        self.assertEqual(len(gview_ordered_nodes), len(model_json['graph']))
+
+        for node_gview, node_json in zip(gview_ordered_nodes, model_json['graph']):   
+                    
+            self.assertEqual(node_gview.get_operator().type(), node_json['optype'])
+            self.assertEqual(node_gview.get_operator().nb_inputs(), node_json['nb_inputs'])
+            self.assertEqual(node_gview.get_operator().nb_outputs(), node_json['nb_outputs'])
+            
+            self.assertEqual(node_gview.get_operator().nb_inputs(), len(node_json['inputs']))
+            for input_idx in range(node_gview.get_operator().nb_inputs()):
+                self.assertEqual(node_gview.get_operator().get_input(input_idx).dims(), node_json['inputs'][input_idx]['dims'])
+                self.assertEqual(str(node_gview.get_operator().get_input(input_idx).dtype()), node_json['inputs'][input_idx]['data_type'])
+                self.assertEqual(str(node_gview.get_operator().get_input(input_idx).dformat()), node_json['inputs'][input_idx]['data_format'])
+
+            self.assertEqual(node_gview.get_operator().nb_outputs(), len(node_json['outputs']))
+            for output_idx in range(node_gview.get_operator().nb_outputs()):
+                self.assertEqual(node_gview.get_operator().get_output(output_idx).dims(), node_json['outputs'][output_idx]['dims'])
+                self.assertEqual(str(node_gview.get_operator().get_output(output_idx).dtype()), node_json['outputs'][output_idx]['data_type'])
+                self.assertEqual(str(node_gview.get_operator().get_output(output_idx).dformat()), node_json['outputs'][output_idx]['data_format'])
+
+            self.assertEqual(len(node_gview.get_parents()), len(node_json['parents']))                  
+            self.assertEqual(len(node_gview.get_children()), len(node_json['children']))
+
+            if not hasattr(node_gview.get_operator(), 'get_micro_graph'):
+                try:
+                    self.assertEqual(len(node_gview.get_operator().attr.dict()), len(node_json['attributes']))
+                    self.assertDictEqual(node_gview.get_operator().attr.dict(), node_json['attributes'])
+
+                except AttributeError:
+                    self.assertIsNone(node_gview.get_operator().attr) and self.assertFalse(node_json['attributes'])
+
+            elif hasattr(node_gview.get_operator(), 'get_micro_graph'):
+                
+                self.assertEqual(len(node_gview.get_operator().get_micro_graph().get_nodes()), len(node_json['attributes']['micro_graph']))
+                
+                for micro_node_gview in node_gview.get_operator().get_micro_graph().get_nodes():
+                    for micro_node_json in node_json['attributes']['micro_graph']:
+                        if micro_node_gview.get_operator().type() == micro_node_json['optype']:
+                            
+                            for key, value in micro_node_gview.get_operator().attr.dict().items():
+                                if not type(value).__name__ in dir(builtins):
+                                    # Replace original value by its name (str) because value is of a type that could not be written to the JSON
+                                    # Cannot update this dict inplace : micro_node_gview.get_operator().attr.dict().update({key : value.name}) 
+                                    temp_mnode_dict = micro_node_gview.get_operator().attr.dict()
+                                    temp_mnode_dict.update({key : value.name})
+                                    self.assertDictEqual(temp_mnode_dict, micro_node_json['attributes'])                
+                    
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/aidge_core/unit_tests/test_topological_order.py b/aidge_core/unit_tests/test_topological_order.py
index 8e7f2e2d9b9770c2fae1e5c2812ba33113589134..01a69409e86c486ec2fb8c8bdb2a18ab0e3d9c1c 100644
--- a/aidge_core/unit_tests/test_topological_order.py
+++ b/aidge_core/unit_tests/test_topological_order.py
@@ -29,7 +29,7 @@ class test_topological_order(unittest.TestCase):
         loop0.get_operator().set_back_edges({1})
         assert not loop0.get_operator().is_back_edge(0)
         assert loop0.get_operator().is_back_edge(1)
-        add0 = aidge_core.Add(2, "add0")
+        add0 = aidge_core.Add("add0")
 
         loop0.add_child(add0, 0, 1)
         add0.add_child(loop0, 0, 1)
@@ -50,7 +50,7 @@ class test_topological_order(unittest.TestCase):
         loop0.get_operator().set_back_edges({0})
         assert not loop0.get_operator().is_back_edge(1)
         assert loop0.get_operator().is_back_edge(0)
-        add0 = aidge_core.Add(2, "add0")
+        add0 = aidge_core.Add("add0")
 
         loop0.add_child(add0, 0, 1)
         add0.add_child(loop0, 0, 0)
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index cadd8c85ca541862cc6f298fa055713a6f65e3ed..a06202696951a888284f1e0a50b6c25b677fe7f3 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -41,6 +41,7 @@
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/BitShift.hpp"
+#include "aidge/operator/Clip.hpp"
 #include "aidge/operator/Concat.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
@@ -51,6 +52,8 @@
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/GlobalAveragePooling.hpp"
 #include "aidge/operator/GridSample.hpp"
+#include "aidge/operator/Heaviside.hpp"
+#include "aidge/operator/ILayerNorm.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/MetaOperator.hpp"
@@ -65,7 +68,10 @@
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Reshape.hpp"
 #include "aidge/operator/Resize.hpp"
+#include "aidge/operator/Round.hpp"
 #include "aidge/operator/Shape.hpp"
+#include "aidge/operator/ShiftMax.hpp"
+#include "aidge/operator/ShiftGELU.hpp"
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/operator/Slice.hpp"
 #include "aidge/operator/Softmax.hpp"
@@ -76,9 +82,7 @@
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/stimuli/Stimulus.hpp"
 
-#include "aidge/operator/ShiftMax.hpp"
-#include "aidge/scheduler/ShiftGELU.hpp"
-#include "aidge/stimuli/ILayerNorm.hpp"
+
 
 #include "aidge/recipes/Recipes.hpp"
 
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 4af7da64ebca3c02eb9aabca1f2dad88fd8b9829..8a6684b22b4c4659353fa5b5dee2b0820c46a11f 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -28,7 +28,7 @@ class Operator;
 
 /**
  * @brief ImplSpec stores the requirements or the specifications of an implementation.
- * 
+ *
  */
 struct ImplSpec {
     struct IOSpec {
@@ -73,10 +73,15 @@ inline bool operator<(const ImplSpec& lhs, const ImplSpec& rhs) {
         || (lhs.inputs == rhs.inputs && lhs.outputs == rhs.outputs && lhs.attrs < rhs.attrs);
 }
 
+
+inline bool operator==(const ImplSpec& lhs, const ImplSpec& rhs) {
+    return !(lhs < rhs) && !(rhs < lhs);
+}
+
 /**
  * @brief Impl stores the details of a specific implementation.
  * It is associated to a ImplSpec in a registry.
- * 
+ *
  */
 template <class FwdFunc, class BwdFunc>
 struct Impl {
@@ -108,7 +113,7 @@ public:
     /**
      * @brief Get the operator required implementation specification, according
      * to the current operator configuration.
-     * 
+     *
      */
     ImplSpec getRequiredSpec() const;
 
@@ -116,15 +121,15 @@ public:
      * @brief Get the best implementation that matches \p requiredSpecs.
      * If no implementation matches \p requiredSpecs, \p requiredSpecs is
      * returned.
-     * 
+     *
      */
     ImplSpec getBestMatch(const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Get an adapted meta operator corresponding to the required 
+     * @brief Get an adapted meta operator corresponding to the required
      * specifications \p requiredSpecs from the implementation specifications
      * \p spec.
-     * 
+     *
      * @param spec Implementation specification
      * @param requiredSpecs Required specifications
      * @return std::shared_ptr<Node> Adapted meta op or nullptr
@@ -132,12 +137,12 @@ public:
     std::shared_ptr<Node> getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Get the best adapted meta operator corresponding to the required 
+     * @brief Get the best adapted meta operator corresponding to the required
      * specifications \p requiredSpecs.
      * The best adaptation is the one with the lowest overhead cost.
-     * Currently, it is the one requiring the least number of additionnal 
+     * Currently, it is the one requiring the least number of additional
      * operators to match the available implementations.
-     * 
+     *
      * @param requiredSpecs Required specifications
      * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
@@ -147,7 +152,7 @@ public:
 
 protected:
     virtual std::shared_ptr<ProdConso> getProdConso() const;
-    virtual std::set<ImplSpec> getAvailableImplSpecs() const;
+    virtual std::vector<ImplSpec> getAvailableImplSpecs() const;
     bool checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const;
 
     const Operator &mOp;
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 57c6c385d5fdcc9f2439983bd04cc8ece0d8d8f5..864789c19181b52351fc09a63a787feaed31a216 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -144,7 +144,7 @@ public:
 
     /**
      * Return the raw device pointer.
-     * The raw pointer is garanteed to be valid only on the *same* device.
+     * The raw pointer is guaranteed to be valid only on the *same* device.
      * @param offset Offset, in number of elements.
     */
     virtual void* rawPtr(NbElts_t offset = 0) = 0;
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 6454ed233c561e386199e4db40ca698ee9edad8a..d04624fc530a21730cc4dc1f4f1ac90a58e6590b 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -23,7 +23,7 @@ namespace Aidge {
 
 template <class T>
 class TensorImpl_cpu : public TensorImpl {
-    static_assert(std::is_trivially_copyable<T>::value, "TensorImpl type should be trivially copyable");
+    static_assert(std::is_trivially_copyable<T>::value, "TensorImpl type should be trivially copiable");
 
 private:
     /// Pointer to the data and its capacity
@@ -50,10 +50,10 @@ public:
     void zeros() override final;
 
     void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
+        AIDGE_ASSERT(offset + length <= mNbElts, "TensorImpl_cpu<{}>::copy(): copy offset ({}) + length ({}) is above capacity ({})", typeid(T).name(), offset, length, mNbElts);
         const T* srcT = static_cast<const T *>(src);
         T* dstT = static_cast<T *>(rawPtr(offset));
 
-        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "TensorImpl_cpu<{}>::copy(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts);
         AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "TensorImpl_cpu<{}>::copy(): overlapping copy is not supported", typeid(T).name());
         std::copy(srcT, srcT + length, dstT);
     }
@@ -72,7 +72,7 @@ public:
 
     void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
         const T* src = static_cast<const T*>(rawPtr(offset));
-        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "TensorImpl_cpu<{}>::copyToHost(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts);
+        AIDGE_ASSERT(offset + length <= mData.size(), "TensorImpl_cpu<{}>::copy(): copy offset ({}) + length ({}) is above capacity ({})", typeid(T).name(), offset, length, mData.size());
         std::copy(src, src + length, static_cast<T *>(dst));
     }
 
@@ -119,30 +119,17 @@ private:
 template <typename T>
 const std::string TensorImpl_cpu<T>::Backend = "cpu";
 
-namespace {
-static Registrar<Tensor> registrarTensorImpl_cpu_Float64(
-        {"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
-        {"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
-        {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
-        {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<int64_t>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
-        {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int32_t>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
-        {"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_Int8(
-        {"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_UInt64(
-        {"cpu", DataType::UInt64}, Aidge::TensorImpl_cpu<uint64_t>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_UInt32(
-        {"cpu", DataType::UInt32}, Aidge::TensorImpl_cpu<uint32_t>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
-        {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_UInt8(
-        {"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
-}  // namespace
+REGISTRAR(Tensor, {"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<int64_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int32_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::UInt64}, Aidge::TensorImpl_cpu<uint64_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::UInt32}, Aidge::TensorImpl_cpu<uint32_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
+REGISTRAR(Tensor, {"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 23221e653ba725e4463b06cfabb5483a20756701..a34718296e4ccddbfca0b4eb0daf14b08124389a 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -20,6 +20,7 @@
 
 #include "aidge/data/half.hpp"
 #include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
 enum class DataType {
@@ -52,6 +53,9 @@ enum class DataType {
     Any
 };
 
+bool isDataTypeFloatingPoint(const DataType& type);
+size_t getDataTypeBitWidth(const DataType& type);
+
 enum class DataFormat {
     Default,
     NCHW,
@@ -80,7 +84,7 @@ constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
 /**
  * Get the DataFormatTranspose array to transpose data from src to dst DataFormat.
  * @param src Source DataFormat
- * @param dst Destinatin DataFormat
+ * @param dst Destination DataFormat
  * @return DataFormatTranspose Permutation array to achieve a transposition
  *         from src to dst DataFormat.
 */
@@ -88,7 +92,19 @@ DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataForm
 
 class Data {
 public:
+    Data() = delete;
+    Data(Data&& other) = default;
+    Data(const Data& other) = default;
     Data(const std::string& type): mType(type) {};
+
+    Data& operator=(const Data& other) {
+        AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fo Data object.");
+        return *this;
+    };
+    Data& operator=(Data&& other) {
+        AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fo Data object.");
+        return *this;
+    };
     constexpr const std::string& type() const {
         return mType;
     }
diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp
index 6c19b5355e406454a2e20bc8994d0ab04d53576a..ad145ca393e4d88210dbda98ab1cb8b37a2480ba 100644
--- a/include/aidge/data/DataProvider.hpp
+++ b/include/aidge/data/DataProvider.hpp
@@ -25,7 +25,7 @@ namespace Aidge {
 /**
  * @brief Data Provider. Takes in a database and compose batches by fetching data from the given database.
  * @todo Implement Drop last batch option. Currently returns the last batch with less elements in the batch.
- * @todo Implement readRandomBatch to compose batches from the database with a random sampling startegy. Necessary for training.
+ * @todo Implement readRandomBatch to compose batches from the database with a random sampling strategy. Necessary for training.
  */
 class DataProvider {
 private:
diff --git a/include/aidge/data/Interpolation.hpp b/include/aidge/data/Interpolation.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d1af3e831617660356fe48d7d5665564f125c21d
--- /dev/null
+++ b/include/aidge/data/Interpolation.hpp
@@ -0,0 +1,151 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_UTILS_INTERPOLATION_H_
+#define AIDGE_CORE_UTILS_INTERPOLATION_H_
+
+#include <cstdint>  // std::int64_t
+#include <utility>  // std::pair
+#include <vector>
+
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+/* @brief generic class to hold interpolation */
+class Interpolation {
+  public:
+    /**
+     * @brief simple type alias to describe a coordinates
+     * @note the indexes are deliberately chosen to be signed values as some
+     * points retrieved by interpolation are out of bound, hence their coords
+     * can be < 0
+     */
+    using Coords = std::vector<std::int64_t>;
+    /**
+     * @brief type alias to designate a point of any type : hence coordinates &
+     * associated value
+     */
+    template <class T> using Point = std::pair<Coords, T>;
+
+    /**
+     * @brief details how coordinates are transformed from interpolated tensor
+     * to original tensor
+     */
+    enum CoordinateTransformation {
+        HalfPixel,
+        HalfPixelSymmetric,
+        PytorchHalfPixel,
+        AlignCorners,
+        Asymmetric,
+    };
+
+    /**
+     * @brief apply transformation to coords in interpolated Tensor to find
+     * equivalent coordinates in original tensor reference frame.
+     * @warning it is assumed that all parameters have the same
+     * number of dimensions.
+     * @param[in] transformedCoords : coords in interpolated tensor
+     * @param[in] inputDims: input dimensions of tensor
+     * @param[in] inputDims: output dimensions of tensor
+     * @return std::vector containing coords in original tensor reference frame
+     */
+    static std::vector<float> untransformCoordinates(
+        const std::vector<DimSize_t> &transformedCoords,
+        const std::vector<DimSize_t> &inputDims,
+        const std::vector<DimSize_t> &outputDims,
+        const Interpolation::CoordinateTransformation coordTransfoMode);
+
+    /**
+     * @brief retrieves neighbouring value of a given index
+     * @param[in] tensorValues raw pointer of the tensor values
+     * retrieved with
+     * @code
+     * tensor->getImpl()->rawPtr()
+     * @endcode
+     * @param[in] tensorDimensions dimensions of given tensor
+     * retrieved with
+     * @code
+     * tensor->dims()
+     * @endcode
+     * @param[in] coords coordinates in the tensor of the values we want to
+     * find the neighbours of.
+     * @return static std::vector<std::pair<std::vector<DimSize_t>, T>>
+     * containing both indexes of neighbours & their values
+     */
+    template <typename T>
+    static std::set<Point<T>>
+    retrieveNeighbours(const T *tensorValues,
+                       const std::vector<DimSize_t> &tensorDims,
+                       const std::vector<float> &coords,
+                       const PadBorderType paddingMode = PadBorderType::Zero);
+
+    /* @brief interpolation type */
+    enum Mode {
+        Cubic,
+        Linear,
+        RoundPreferFloor,
+        RoundPreferCeil,
+        Floor,
+        Ceil
+    };
+
+    /*
+     * @brief Interpolates values given via input in given mode.
+     *
+     * @warning This function is empty and is meant to be overridden in derived
+     * class in backend libraries.
+     *
+     * Values are contiguously arranged in a "square" shape around the point to
+     * interpolate. Depending on interpolation mode.
+     * The point that will be interpolated is located right in the
+     * middle of all points.
+     * Immediate neighbours :
+     * 1D interp :     2D interp :
+     *                 . . . . . .
+     * . . 1 2 . .     . . . . . .
+     *                 . . 1 2 . .
+     *                 . . 3 4 . .
+     *                 . . . . . .
+     *                 . . . . . .
+     *
+     * 2 neighbours :
+     * 1D interp :         2D interp :
+     *                   .  .  .  .  .  .  . .
+     *                   .  .  .  .  .  .  . .
+     * . . 1 2 3 4 . .   .  .  1  2  3  4  . .
+     *                   .  .  5  6  7  8  . .
+     *                   .  .  9 10 11 12  . .
+     *                   .  . 13 14 15 16  . .
+     *                   .  .  .  .  .  .  . .
+     *                   .  .  .  .  .  .  . .
+     *
+     * @param[in] originalIndex: index of the point to in the original picture
+     * Since the coord are being transformed from the interpolatedTensor frame
+     * to originalTensor frame, the result might be in float.
+     * @param[in] points : points to interpolate, arranged in a vector of a
+     * pairs ((point_coord), value) :
+     * [[[X1, X2, ..., XN], Xval], ...., [[A1, A2, ..., AN],Aval]].
+     * With :
+     * - N: the number of dimensions.
+     * - A: the number of points of the grid to interpolate.
+     * - All coordinates expressed in originalTensor frame.
+     * @param[in] interpMode: interpolation mode
+     * @return interpolated value
+     */
+    template <typename T>
+    [[noreturn]] static T interpolate(const std::vector<float> &originalIndex,
+                                      const std::vector<Point<T>> &points,
+                                      const Mode interpMode);
+};
+} // namespace Aidge
+
+#endif
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 58e893ca5d5339d93799415f076dd69d54db69ca..627a5a4784b4e6546cdfc96b65acbe2a39ee119c 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -12,6 +12,7 @@
 #ifndef AIDGE_CORE_DATA_TENSOR_H_
 #define AIDGE_CORE_DATA_TENSOR_H_
 
+#include <algorithm>
 #include <cstddef>      // std::size_t
 #include <cstring>
 #include <functional>   // std::multiplies
@@ -22,12 +23,14 @@
 #include <type_traits>  // std::is_arithmetic
 #include <vector>
 
+#include <fmt/core.h>
+
 #include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
-
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/utils/ArrayHelpers.hpp"
 
 namespace Aidge {
 /**
@@ -75,7 +78,7 @@ class Tensor : public Data,
      * @brief Construct a new Tensor object from an arithmetic parameter.
      *
      * @tparam T Type of the input parameter.
-     * @tparam VT Decayed type of the input paramter.
+     * @tparam VT Decayed type of the input parameter.
      * @param val Input value.
      */
     template<typename T,
@@ -211,14 +214,13 @@ class Tensor : public Data,
 
     /**
      * @brief Copy dimensions, datatype and data from another Tensor.
-     * If current Tensor already has an implementation, data is copied to the
-     * existing implementation. Tensor backend/device remain untouched.
-     * If current Tensor does not have an implementation, only a shallow copy
-     * is performed and the Tensor will share data with t.
+     * Tensor backend/device are also copied and only a shallow copy
+     * is performed for data. Implementation will be shared with original Tensor.
      * @param other other Tensor object.
      * @return Tensor&
      */
-    Tensor &operator=(const Tensor& other);
+    Tensor &operator=(const Tensor& other) = default;
+    Tensor &operator=(Tensor&& other) = default;
 
     template <typename T>
     constexpr Tensor &operator=(Vector<T> &&arr) {
@@ -272,9 +274,20 @@ class Tensor : public Data,
      * @return Tensor
      */
     Tensor operator+(const Tensor& other) const;
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    Tensor operator+(T val) const { return *this + Tensor(val); }
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    friend Tensor operator+(T val, const Tensor& other) { return other + val; }
+
+    Tensor& operator+=(const Tensor& other);
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    Tensor& operator+=(T val) {return *this += Tensor(val); }
 
     /**
-     * @brief Element-wise substraction operation for two ``Tensor``s.
+     * @brief Element-wise subtraction operation for two ``Tensor``s.
      * @note ``Tensor``s should be stored on the same backend.
      * @todo If input ``Tensor``s have a different dataType, the output should
      * have the dataType of the ``Tensor`` with the highest precision.
@@ -283,6 +296,17 @@ class Tensor : public Data,
      * @return Tensor
      */
     Tensor operator-(const Tensor& other) const;
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor operator-(T val) const { return *this - Tensor(val); }
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    friend inline Tensor operator-(T val, const Tensor& other) { return other - val; }
+
+    Tensor& operator-=(const Tensor& other);
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor& operator-=(T val) {return *this -= Tensor(val); }
 
     /**
      * @brief Element-wise multiplication operation for two ``Tensor``s.
@@ -294,6 +318,17 @@ class Tensor : public Data,
      * @return Tensor
      */
     Tensor operator*(const Tensor& other) const;
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor operator*(T val) const { return *this * Tensor(val); }
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    friend inline Tensor operator*(T val, const Tensor& other) { return other * val; }
+
+    Tensor& operator*=(const Tensor& other);
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor& operator*=(T val) {return *this *= Tensor(val); }
 
     /**
      * @brief Element-wise division operation for two ``Tensor``s.
@@ -305,6 +340,14 @@ class Tensor : public Data,
      * @return Tensor
      */
     Tensor operator/(const Tensor& other) const;
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor operator/(T val) const { return *this / Tensor(val); }
+
+    Tensor& operator/=(const Tensor& other);
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor& operator/=(T val) {return *this /= Tensor(val); }
 
     /**
      * @brief Element-wise sqrt operation for Tensor.
@@ -331,14 +374,17 @@ public:
      * @brief Perform a deep copy of the tensor.
     */
     Tensor clone() const {
-        Tensor newTensor(*this);
-        if (!newTensor.isContiguous()) {
-            newTensor.makeContiguous();
-        }
-        else {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
-            newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
-            newTensor.setImpl(newImpl);
+        Tensor newTensor(*this); // shallow copy
+        // handle deepcopy of implementation if any
+        if (newTensor.hasImpl()) {
+            if (!newTensor.isContiguous()) {
+                newTensor.makeContiguous();
+            }
+            else {
+                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
+                newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
+                newTensor.setImpl(newImpl);
+            }
         }
         return newTensor;
     }
@@ -432,7 +478,7 @@ public:
     }
 
     /**
-     * @brief Return if an implementaiton has been associated.
+     * @brief Return if an implementation has been associated.
      * @return true
      * @return false
      */
@@ -486,20 +532,21 @@ public:
     /**
      * @brief Return the current capacity of the tensor, i.e. the actual memory
      * currently being allocated. It can be different from the size:
+     * - Capacity conservatively returns 0 is no implementation is provided.
      * - Capacity can be 0 if the tensor memory was not yet initialized (because
      *   of lazy initialization, memory is allocated only when it needs to be
      *   accessed the first time).
      * - Capacity can be > size if the tensor was downsized but memory was not
      *   reallocated.
     */
-    inline std::size_t capacity() const noexcept { return mImpl->capacity(); }
+    inline std::size_t capacity() const noexcept { return mImpl ? mImpl->capacity(): 0; }
 
 
     /**
      * @brief Change the dimensions of the Tensor object according to the given argument.
      * If the overall size is not changed (meaning we actually only performed a
-     * reshape), data is garanteed to remain valid.
-     * Otherwise, no garantee is provided regarding the validy of previous data
+     * reshape), data is guaranteed to remain valid.
+     * Otherwise, no guarantee is provided regarding the validy of previous data
      * (unlike std::vector). If the new overall size is larger than the previous
      * one, all previous data is invalided. Otherwise, previous data may or may
      * not remain valid, depending on the backend implementation.
@@ -514,8 +561,8 @@ public:
     /**
      * @brief Change the dimensions of the Tensor object according to the given argument.
      * If the overall size is not changed (meaning we actually only performed a
-     * reshape), data is garanteed to remain valid.
-     * Otherwise, no garantee is provided regarding the validy of previous data
+     * reshape), data is guaranteed to remain valid.
+     * Otherwise, no guarantee is provided regarding the validy of previous data
      * (unlike std::vector). If the new overall size is larger than the previous
      * one, all previous data is invalided. Otherwise, previous data may or may
      * not remain valid, depending on the backend implementation.
@@ -561,9 +608,9 @@ public:
 
     template <typename expectedType>
     const expectedType& get(std::size_t idx) const {
-        AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
-        AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
-        AIDGE_ASSERT(idx < mSize, "idx out of range");
+        AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "Tensor::get<>({}): wrong data type, expected {}, got {}", idx, mDataType, NativeType<expectedType>::type);
+        AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "Tensor::get<>({}): can only be used for backends providing a valid host pointer.", idx);
+        AIDGE_ASSERT(idx < mSize, "Tensor::get<>({}): idx {} out of range, tensor size {}", idx, mSize);
         return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx));
     }
 
@@ -620,20 +667,41 @@ public:
      * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor.
      * Beware: do not use this function with the storage index!
      *
-     * @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor.
+     * @param index 1D contiguous index of the value considering a flatten, contiguous, tensor.
      * @return std::vector<DimSize_t>
      */
-    std::vector<std::size_t> getCoord(std::size_t flatIdx) const {
-        std::vector<std::size_t> coordIdx(mDims.size());
-        std::size_t i = mDims.size();
+    static std::vector<std::size_t>
+    toCoord(const std::vector<Aidge::DimSize_t> &dimensions, std::size_t index);
+
 
-        while (i-- > 0) {
-            coordIdx[i] = (flatIdx % mDims[i]);
-            flatIdx/=mDims[i];
+    /**
+     * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor.
+     * Beware: do not use this function with the storage index!
+     *
+     * @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor.
+     * @return std::vector<DimSize_t>
+     */
+    std::vector<std::size_t> getCoord(std::size_t index) const {
+        if (isInBounds(mDims, index)) {
+            return toCoord(mDims, index);
+        } else {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Out of bound coordinates.");
         }
-        return coordIdx;
     }
 
+    /**
+     * @brief From the coordinate returns the 1D contiguous index of an element in the tensor.
+     * If the number of coordinates is inferior to the number of dimensions,
+     * the remaining coordinates are assumed to be 0.
+     * Beware: the contiguous index will only correspond to the storage index
+     * if the tensor is contiguous!
+     * Note that the coordIdx may be an empty vector.
+     *
+     * @param coords Coordinate to an element in the tensor
+     * @return DimSize_t Contiguous index
+     */
+    static std::size_t toIndex(const std::vector<DimSize_t>& dimensions, const std::vector<std::size_t>& coords);
+
     /**
      * @brief From the coordinate returns the 1D contiguous index of an element in the tensor.
      * If the number of coordinates is inferior to the number of dimensions,
@@ -645,18 +713,27 @@ public:
      * @param coordIdx Coordinate to an element in the tensor
      * @return DimSize_t Contiguous index
      */
-    std::size_t getIdx(const std::vector<std::size_t>& coordIdx) const {
-        AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
-        std::size_t flatIdx = 0;
-        for(std::size_t i = 0; i < mDims.size(); ++i) {
-            auto coord = i < coordIdx.size() ? coordIdx[i]: 0;
-            AIDGE_ASSERT(coord < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
-            auto nextDimSize  = i + 1 < mDims.size() ? mDims[i + 1]: 1;
-            flatIdx = (flatIdx + coord) * nextDimSize;
+    std::size_t getIdx(const std::vector<std::size_t>& coords) const {
+        if (isInBounds<std::size_t>(mDims, coords)) {
+            return toIndex(mDims, coords);
+        } else {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Out of bound coordinates.");
         }
-        return flatIdx;
     }
 
+    /**
+     * @brief check if index is in bound of given tensor dimensions
+     * @warning this function is templated in order to welcome cases like interpolation where indexes are not integers.
+     * However, the only types accepted are floating, integer & size_t
+     * @param tensorDims : tensor dimensions
+     * @param coords : coords of the tensor you want to flattened index of
+     * @return true if all coords are in bound. False otherwise
+     */
+    template<typename T>
+    static bool isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<T>& coords);
+
+    static bool isInBounds(const std::vector<DimSize_t>& dimensions, const std::size_t index);
+
     /**
      * @brief From the coordinate returns the 1D storage index of an element in the tensor.
      * If the number of coordinates is inferior to the number of dimensions,
@@ -683,7 +760,7 @@ public:
      * @note No memory copy is performed, the returned tensor does not own the memory.
      * @note If the number of coordinates matches the number of dimensions, a scalar
      * tensor is returned.
-     * @note If current tensor was contiguous, the returned tensor is garanteed to be
+     * @note If current tensor was contiguous, the returned tensor is guaranteed to be
      * contiguous as well.
      *
      * @param coordIdx Coordinates of the sub-tensor to extract
@@ -694,7 +771,7 @@ public:
     /**
      * @brief Returns a sub-tensor at some coordinate and with some dimension.
      *
-     * @note Data contiguity of the returned Tensor is not guaranted.
+     * @note Data contiguity of the returned Tensor is not guaranteed.
      *
      * @param coordIdx First coordinates of the sub-tensor to extract
      * @param dims Dimensions of the sub-tensor to extract
@@ -761,7 +838,7 @@ public:
     }
 
     /**
-     * Return a reference to a Tensor that is garanteed to be contiguous:
+     * Return a reference to a Tensor that is guaranteed to be contiguous:
      * - itself, if already contiguous;
      * - the provided Tensor, overwritten with the copied data.
      * The data type, backend and device stay the same.
@@ -893,4 +970,17 @@ private:
 };
 }  // namespace Aidge
 
+template<>
+struct fmt::formatter<Aidge::Tensor> {
+    template<typename ParseContext>
+    inline constexpr auto parse(ParseContext& ctx) {
+        return ctx.begin();
+    }
+
+    template<typename FormatContext>
+    inline auto format(Aidge::Tensor const& t, FormatContext& ctx) const {
+        return fmt::format_to(ctx.out(), "{}", t.toString());
+    }
+};
+
 #endif /* AIDGE_CORE_DATA_TENSOR_H_ */
diff --git a/include/aidge/data/half.hpp b/include/aidge/data/half.hpp
index 89df93cf3d10087833b3ad00dfbe3afd4e94c725..1464ac1e092e43059048825bf98d1186314b902c 100644
--- a/include/aidge/data/half.hpp
+++ b/include/aidge/data/half.hpp
@@ -213,11 +213,11 @@
 	#define HALF_ROUND_STYLE	-1			// = std::round_indeterminate
 #endif
 
-/// Tie-breaking behaviour for round to nearest.
+/// Tie-breaking behavior for round to nearest.
 /// This specifies if ties in round to nearest should be resolved by rounding to the nearest even value. By default this is
-/// defined to `0` resulting in the faster but slightly more biased behaviour of rounding away from zero in half-way cases (and
+/// defined to `0` resulting in the faster but slightly more biased behavior of rounding away from zero in half-way cases (and
 /// thus equal to the round() function), but can be redefined to `1` (before including half.hpp) if more IEEE-conformant
-/// behaviour is needed.
+/// behavior is needed.
 #ifndef HALF_ROUND_TIES_TO_EVEN
 	#define HALF_ROUND_TIES_TO_EVEN	0		// ties away from zero
 #endif
@@ -950,7 +950,7 @@ namespace half_float
 		/// Convert half-precision floating point to integer.
 		/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
 		/// \tparam E `true` for round to even, `false` for round away from zero
-		/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits)
+		/// \tparam T type to convert to (builtin integer type with at least 16 bits precision, excluding any implicit sign bits)
 		/// \param value binary representation of half-precision value
 		/// \return integral value
 		template<std::float_round_style R,bool E,typename T> T half2int_impl(uint16 value)
@@ -988,13 +988,13 @@ namespace half_float
 
 		/// Convert half-precision floating point to integer.
 		/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
-		/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits)
+		/// \tparam T type to convert to (builtin integer type with at least 16 bits precision, excluding any implicit sign bits)
 		/// \param value binary representation of half-precision value
 		/// \return integral value
 		template<std::float_round_style R,typename T> T half2int(uint16 value) { return half2int_impl<R,HALF_ROUND_TIES_TO_EVEN,T>(value); }
 
 		/// Convert half-precision floating point to integer using round-to-nearest-away-from-zero.
-		/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits)
+		/// \tparam T type to convert to (builtin integer type with at least 16 bits precision, excluding any implicit sign bits)
 		/// \param value binary representation of half-precision value
 		/// \return integral value
 		template<typename T> T half2int_up(uint16 value) { return half2int_impl<std::round_to_nearest,0,T>(value); }
@@ -1053,16 +1053,16 @@ namespace half_float
 
 	/// Half-precision floating point type.
 	/// This class implements an IEEE-conformant half-precision floating point type with the usual arithmetic operators and
-	/// conversions. It is implicitly convertible to single-precision floating point, which makes artihmetic expressions and
+	/// conversions. It is implicitly convertible to single-precision floating point, which makes arithmetic expressions and
 	/// functions with mixed-type operands to be of the most precise operand type. Additionally all arithmetic operations
 	/// (and many mathematical functions) are carried out in single-precision internally. All conversions from single- to
 	/// half-precision are done using the library's default rounding mode, but temporary results inside chained arithmetic
 	/// expressions are kept in single-precision as long as possible (while of course still maintaining a strong half-precision type).
 	///
 	/// According to the C++98/03 definition, the half type is not a POD type. But according to C++11's less strict and
-	/// extended definitions it is both a standard layout type and a trivially copyable type (even if not a POD type), which
+	/// extended definitions it is both a standard layout type and a trivially copiable type (even if not a POD type), which
 	/// means it can be standard-conformantly copied using raw binary copies. But in this context some more words about the
-	/// actual size of the type. Although the half is representing an IEEE 16-bit type, it does not neccessarily have to be of
+	/// actual size of the type. Although the half is representing an IEEE 16-bit type, it does not necessarily have to be of
 	/// exactly 16-bits size. But on any reasonable implementation the actual binary representation of this type will most
 	/// probably not ivolve any additional "magic" or padding beyond the simple binary representation of the underlying 16-bit
 	/// IEEE number, even if not strictly guaranteed by the standard. But even then it only has an actual size of 16 bits if
@@ -2155,25 +2155,25 @@ namespace half_float
 		/// \name Arithmetic operators
 		/// \{
 
-		/// Add halfs.
+		/// Add halves.
 		/// \param x left operand
 		/// \param y right operand
 		/// \return sum of half expressions
 		template<typename T,typename U> typename enable<expr,T,U>::type operator+(T x, U y) { return functions::plus(x, y); }
 
-		/// Subtract halfs.
+		/// Subtract halves.
 		/// \param x left operand
 		/// \param y right operand
 		/// \return difference of half expressions
 		template<typename T,typename U> typename enable<expr,T,U>::type operator-(T x, U y) { return functions::minus(x, y); }
 
-		/// Multiply halfs.
+		/// Multiply halves.
 		/// \param x left operand
 		/// \param y right operand
 		/// \return product of half expressions
 		template<typename T,typename U> typename enable<expr,T,U>::type operator*(T x, U y) { return functions::multiplies(x, y); }
 
-		/// Divide halfs.
+		/// Divide halves.
 		/// \param x left operand
 		/// \param y right operand
 		/// \return quotient of half expressions
@@ -2181,7 +2181,7 @@ namespace half_float
 
 		/// Identity.
 		/// \param arg operand
-		/// \return uncahnged operand
+		/// \return unchanged operand
 		template<typename T> HALF_CONSTEXPR typename enable<T,T>::type operator+(T arg) { return arg; }
 
 		/// Negation.
@@ -2330,28 +2330,28 @@ namespace half_float
 		inline expr exp2(half arg) { return functions::exp2(arg); }
 		inline expr exp2(expr arg) { return functions::exp2(arg); }
 
-		/// Natural logorithm.
+		/// Natural logarithm.
 		/// \param arg function argument
 		/// \return logarithm of \a arg to base e
 //		template<typename T> typename enable<expr,T>::type log(T arg) { return functions::log(arg); }
 		inline expr log(half arg) { return functions::log(arg); }
 		inline expr log(expr arg) { return functions::log(arg); }
 
-		/// Common logorithm.
+		/// Common logarithm.
 		/// \param arg function argument
 		/// \return logarithm of \a arg to base 10
 //		template<typename T> typename enable<expr,T>::type log10(T arg) { return functions::log10(arg); }
 		inline expr log10(half arg) { return functions::log10(arg); }
 		inline expr log10(expr arg) { return functions::log10(arg); }
 
-		/// Natural logorithm.
+		/// Natural logarithm.
 		/// \param arg function argument
 		/// \return logarithm of \a arg plus 1 to base e
 //		template<typename T> typename enable<expr,T>::type log1p(T arg) { return functions::log1p(arg); }
 		inline expr log1p(half arg) { return functions::log1p(arg); }
 		inline expr log1p(expr arg) { return functions::log1p(arg); }
 
-		/// Binary logorithm.
+		/// Binary logarithm.
 		/// \param arg function argument
 		/// \return logarithm of \a arg to base 2
 //		template<typename T> typename enable<expr,T>::type log2(T arg) { return functions::log2(arg); }
@@ -2620,7 +2620,7 @@ namespace half_float
 		/// Multiply by power of two.
 		/// \param arg number to modify
 		/// \param exp power of two to multiply with
-		/// \return \a arg multplied by 2 raised to \a exp
+		/// \return \a arg multiplied by 2 raised to \a exp
 //		template<typename T> typename enable<half,T>::type ldexp(T arg, int exp) { return functions::scalbln(arg, exp); }
 		inline half ldexp(half arg, int exp) { return functions::scalbln(arg, exp); }
 		inline half ldexp(expr arg, int exp) { return functions::scalbln(arg, exp); }
@@ -2636,7 +2636,7 @@ namespace half_float
 		/// Multiply by power of two.
 		/// \param arg number to modify
 		/// \param exp power of two to multiply with
-		/// \return \a arg multplied by 2 raised to \a exp
+		/// \return \a arg multiplied by 2 raised to \a exp
 //		template<typename T> typename enable<half,T>::type scalbn(T arg, int exp) { return functions::scalbln(arg, exp); }
 		inline half scalbn(half arg, int exp) { return functions::scalbln(arg, exp); }
 		inline half scalbn(expr arg, int exp) { return functions::scalbln(arg, exp); }
@@ -2644,7 +2644,7 @@ namespace half_float
 		/// Multiply by power of two.
 		/// \param arg number to modify
 		/// \param exp power of two to multiply with
-		/// \return \a arg multplied by 2 raised to \a exp
+		/// \return \a arg multiplied by 2 raised to \a exp
 //		template<typename T> typename enable<half,T>::type scalbln(T arg, long exp) { return functions::scalbln(arg, exp); }
 		inline half scalbln(half arg, long exp) { return functions::scalbln(arg, exp); }
 		inline half scalbln(expr arg, long exp) { return functions::scalbln(arg, exp); }
@@ -2798,7 +2798,7 @@ namespace half_float
 		inline bool islessequal(expr x, half y) { return functions::islessequal(x, y); }
 		inline bool islessequal(expr x, expr y) { return functions::islessequal(x, y); }
 
-		/// Comarison for less or greater.
+		/// Comparison for less or greater.
 		/// \param x first operand
 		/// \param y second operand
 		/// \retval true if either less or greater
@@ -3027,7 +3027,7 @@ namespace std
 		/// Quiet NaN.
 		static HALF_CONSTEXPR half_float::half quiet_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7FFF); }
 
-		/// Signalling NaN.
+		/// Signaling NaN.
 		static HALF_CONSTEXPR half_float::half signaling_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7DFF); }
 
 		/// Smallest positive subnormal value.
diff --git a/include/aidge/data/half_fmt.hpp b/include/aidge/data/half_fmt.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..5e2072038c10fffd8db5f7fe93381f002f2119b1
--- /dev/null
+++ b/include/aidge/data/half_fmt.hpp
@@ -0,0 +1,18 @@
+#include "aidge/data/half.hpp"
+#include <fmt/core.h>
+
+// Specialize fmt::formatter for half_float::half
+template <>
+struct fmt::formatter<half_float::half> : fmt::formatter<float> {
+    // Parses the format specifications and stores them in the base formatter
+    template <typename ParseContext>
+    constexpr auto parse(ParseContext& ctx) {
+        return fmt::formatter<float>::parse(ctx);
+    }
+
+    // Formats the half type by first converting it to float
+    template <typename FormatContext>
+    auto format(const half_float::half& value, FormatContext& ctx) const {
+        return fmt::formatter<float>::format(static_cast<float>(value), ctx);
+    }
+};
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index efdb06c4ac6d0e6898d899cc639a88d1da301000..76f5dcdfc28e90a3f83435841af21048bcb2a9c0 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -91,7 +91,7 @@ public:
 
     /**
      * @brief Set the node name.
-     * @warning Undefined behaviour when several Nodes have the same name.
+     * @warning Undefined behavior when several Nodes have the same name.
      * @param name New name for the node.
      */
     inline void setName(const std::string &name) { mName = name; }
@@ -184,7 +184,7 @@ public:
     /**
      * @brief List outside data input connections of the GraphView.
      * Data inputs exclude inputs expecting parameters (weights or bias).
-     * The vector size is garanteed to match the number of outside data inputs of the GraphView. If there is
+     * The vector size is guaranteed to match the number of outside data inputs of the GraphView. If there is
      * no external connection to a given input, a pair of nullptr and gk_IODefaultIndex is returned.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
@@ -214,7 +214,7 @@ public:
 
     /**
      * @brief List outside output connections of the GraphView. The vector
-     * size is garanteed to match the number of outputs of the GraphView. If there is
+     * size is guaranteed to match the number of outputs of the GraphView. If there is
      * no connection to a given output, the corresponding sub-vector will be empty.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
@@ -319,12 +319,12 @@ public:
      * - The childs and parents of the next node in the ranked list are then
      *   added to the list, and so on.
      * - Any remaining nodes have no path to the root node and are added in
-     *   arbitrary order. In this case, the ranking is not garanteed to be unique.
+     *   arbitrary order. In this case, the ranking is not guaranteed to be unique.
      *
-     * If the ranking cannot be garanteed to be unique, the second item indicates
-     * the rank from which unicity cannot be garanteed.
+     * If the ranking cannot be guaranteed to be unique, the second item indicates
+     * the rank from which unicity cannot be guaranteed.
      * @return std::pair<std::vector<NodePtr>, size_t> Pair with the list of ranked
-     * nodes and the size of the ranked sub-list where unicity is garanteed.
+     * nodes and the size of the ranked sub-list where unicity is guaranteed.
     */
     std::pair<std::vector<NodePtr>, size_t> getRankedNodes() const;
 
@@ -394,7 +394,7 @@ public:
      * @param fromOutNode Pointer to the already included Node the new Node will
      * be linked to (it will become a parent of the new Node). If the GraphView
      * only has one output Node, then default to this Node.
-     * @param fromTensor Ouput Tensor ID of the already included Node. Default to
+     * @param fromTensor Output Tensor ID of the already included Node. Default to
      * 0.
      * @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
      * first available data input for the Node.
@@ -412,7 +412,7 @@ public:
      * be linked to (it will become a parent of the new Node). As a name is
      * optional, ensure such Node is in the GraphView or it will send back an
      * error message.
-     * @param fromTensor Ouput Tensor ID of the already included Node. Default to
+     * @param fromTensor Output Tensor ID of the already included Node. Default to
      * 0.
      * @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
      * first available data input for the Node.
@@ -424,16 +424,19 @@ public:
         addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
     }
 
-    inline void updateNodeName(NodePtr nodeToRename, const std::string& newName){
-        const std::string& oldName = nodeToRename->name();
-        AIDGE_ASSERT(mNodeRegistry.find(newName) != mNodeRegistry.end(), "Name {} is already used in graph {}.", newName, name());
+    inline void updateNodeName(const std::shared_ptr<Node>& node, const std::string& newName){
+        if (!newName.empty()) {
+            auto itNew = mNodeRegistry.insert(std::make_pair(newName, node));
+            if (!itNew.second) {
+                Log::notice("Replacing existing node name in graph node name registry: {}", newName);
+                (itNew.first)->second = node;
+            }
+        }
 
-        if (nodeToRename->name() != ""){ // Case node already had a name
-            AIDGE_ASSERT(mNodeRegistry.find(oldName) != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", oldName, name());
-            mNodeRegistry[newName] = mNodeRegistry[oldName];
-            mNodeRegistry.erase(oldName);
-        }else{ // Case node did not had a name
-            mNodeRegistry[newName] = nodeToRename;
+        if (!node->name().empty()) {
+            const auto it = mNodeRegistry.find(node->name());
+            AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", node->name(), name());
+            mNodeRegistry.erase(it);
         }
     }
 
@@ -486,7 +489,7 @@ public:
      * Both sets should include all the necessary Producers.
      * @details There are 3 cases of replacement:
      * Case 1: same number of input/output connections for oldNodes and newNodes sets.
-     *     - input/output connections are replacated according to their IDs.
+     *     - input/output connections are replicated according to their IDs.
      * Case 2: different number of input/output connections for oldNodes and newNodes sets.
      *     - only a single parent/child node for the newNodes set, every input/output is
      *       connected to it.
diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
index 951aa6b29d73d9055cf9f13c8ddc6313cb506879..3b0874580b112f4c219886a78677e6c9801b72b8 100644
--- a/include/aidge/graph/Matching.hpp
+++ b/include/aidge/graph/Matching.hpp
@@ -53,7 +53,7 @@ public:
     struct MatchingResult {
         // Mutable is required to allow modifying MatchingResult members with a std::set
         // iterator. Any change should not modify the set ordering.
-        // We use graph->rootNode() as the std::set key, which is garanteed
+        // We use graph->rootNode() as the std::set key, which is guaranteed
         // to never change after insertion!
         mutable std::shared_ptr<GraphView> graph;
         mutable std::map<std::string, std::map<std::string, NodePtr>> anchors;
@@ -134,7 +134,7 @@ public:
      * QUERY = SEQ | NODE_OR_BLOCK (';' (SEQ | NODE_OR_BLOCK))*
      *
      * @param query The query to search.
-     * @param disjoint If true, only keep the longuest disjoint (non-overlapping) matches.
+     * @param disjoint If true, only keep the longest disjoint (non-overlapping) matches.
      * @return std::set<MatchingResult> Set of matches, each stored in a MatchingResult struct.
     */
     std::set<MatchingResult> match(const std::string& query, bool disjoint = false);
@@ -150,17 +150,17 @@ public:
     MatchingResult matchFrom(NodePtr startNode, const std::string& query);
 
     /**
-     * Filter to keep only the longuest disjoint (non-overlapping) matches.
+     * Filter to keep only the longest disjoint (non-overlapping) matches.
     */
     std::set<MatchingResult> filterLonguestDisjoint(const std::set<MatchingResult>& matches);
 
-    inline void addNodeLambda(const std::string& name, bool(func)(const NodePtr&)) {
+    inline void addNodeLambda(const std::string& name, std::function<bool(const NodePtr&)> func) {
         mLambda[name] = func;
     }
 
 private:
     std::shared_ptr<GraphView> mGraph;
-    std::map<std::string, bool(*)(const NodePtr&)> mLambda;
+    std::map<std::string, std::function<bool(const NodePtr&)>> mLambda;
 
     /**
      * QUANTIFIER = '?' | '*' | '+' | ('{' [0-9]+ '}')
@@ -216,7 +216,7 @@ private:
         bool operator()(const MatchingResult& lhs, const MatchingResult& rhs) const {
             // Some matches size could be the same
             if (lhs.graph->getNodes().size() == rhs.graph->getNodes().size()) {
-                // In this case, use rootNode which is garanteed to be different!
+                // In this case, use rootNode which is guaranteed to be different!
                 return lhs.graph->rootNode() < rhs.graph->rootNode();
             }
 
@@ -226,7 +226,7 @@ private:
 };
 
 inline bool operator<(const Aidge::SinglePassGraphMatching::MatchingResult& lhs, const Aidge::SinglePassGraphMatching::MatchingResult& rhs) {
-    // Matches rootNode are garanteed to be different!
+    // Matches rootNode are guaranteed to be different!
     return lhs.graph->rootNode() < rhs.graph->rootNode();
 }
 }  // namespace Aidge
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 32932fa6f598737644f74d4e2ce5da89557b5d3d..a16bbd63ecf52e8c97d5032c5c90a5f69186f995 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -54,7 +54,7 @@ private:
           return sharedA < sharedB; // shared_ptr has a valid comparison operator
       }
   };
-  std::string mName; /** Name of the Node. Should be unique. */
+  std::shared_ptr<DynamicAttributes> mAttrs;
 
   std::set<std::weak_ptr<GraphView>, weakCompare> mViews; /** Set of pointers to GraphView instances including this Node instance. */
   const std::shared_ptr<Operator> mOperator; // Pointer to the associated Operator
@@ -70,6 +70,14 @@ private:
 public:
   Node() = delete;
 
+  /**
+   * @brief Construct a new Node object associated with the input Operator.
+   * @param op Operator giving the Node its number of connections.
+   * @param attrs Attributes for the Node.
+   */
+  Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs);
+//   Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs);
+
   /**
    * @brief Construct a new Node object associated with the input Operator.
    * @param op Operator giving the Node its number of connections.
@@ -116,15 +124,18 @@ public:
   //        INNER
   ///////////////////////////////////////////////////////
 
+  inline std::shared_ptr<DynamicAttributes> attributes() const {
+    return mAttrs;
+  }
   /**
    * @brief Name of the Node.
    * @return std::string
    */
-  inline std::string name() const noexcept { return mName; }
+  inline std::string name() const noexcept { return mAttrs->getAttr<std::string>("name"); }
 
   /**
    * @brief Set the Node name.
-   * @warning Undefined behaviour when several Nodes have the same name.
+   * @warning Undefined behavior when several Nodes have the same name.
    * @param name New name for the node.
    */
   void setName(const std::string &name);
@@ -133,7 +144,7 @@ public:
    * @brief Given the parameter name generate a new name which is unique
    * in all the GraphView which contains this node.
    * To generate the new name the method is called recursively and append
-   * the caracter ``_``.
+   * the character ``_``.
    * If no duplicate return name, this is the exit condition.
    * @param name Base name to make unique.
    * @return A unique name in all the GraphView which contains this one.
@@ -164,7 +175,8 @@ public:
    * @brief Get the Operator object of the Node.
    * @return std::shared_ptr<Operator>
    */
-  inline std::shared_ptr<Operator> getOperator() const { return mOperator; }
+  inline std::shared_ptr<Operator> getOperator() const { return (*mOperator)(mAttrs); }
+//   inline std::shared_ptr<Operator> getOperator() const { return mOperator; }
 
   ///////////////////////////////////////////////////////
   //        TENSOR MANAGEMENT
@@ -179,7 +191,7 @@ public:
   bool valid() const;
 
   /**
-   * @brief List of pair <Parent, ID of the data intput>. When an input is not
+   * @brief List of pair <Parent, ID of the data input>. When an input is not
    * linked to any Parent, the pair is <nullptr, gk_IODefaultIndex>.
    * Data inputs exclude inputs expecting parameters (weights or bias).
    * @return std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>
@@ -228,7 +240,7 @@ public:
 
   /**
    * @brief List input ids of children linked to outputs of the node. The vector
-   * size is garanteed to match the number of outputs of the node. If there is
+   * size is guaranteed to match the number of outputs of the node. If there is
    * no connection to a given output, the corresponding sub-vector will be empty.
    * @return std::vector<std::vector<std::pair<std::shared_ptr<Node>,
    * IOIndex_t>>>
@@ -321,7 +333,7 @@ public:
    * @param outId ID of the current Node output to connect to the other Node.
    * Default to 0.
    * @param otherInId ID of the other Node input to connect to the current Node.
-   * Default to the first avaible data input.
+   * Default to the first available data input.
    */
   void addChild(NodePtr otherNode,
                 const IOIndex_t outId = IOIndex_t(0),
@@ -398,7 +410,7 @@ public:
   bool removeChild(const NodePtr nodePtr, const IOIndex_t outId = 0);
 
   /**
-   * @brief Remove every link of surrounding nodes to it and conversly
+   * @brief Remove every link of surrounding nodes to it and conversely
    */
   void resetConnections(bool includeLearnableParam = false);
 
@@ -534,7 +546,7 @@ private:
    */
   void addParent(const NodePtr otherNode, const IOIndex_t inId);
 
-  // OPERATOR FUNCTIONNAL but commented out to avoid iostream inclusion
+  // OPERATOR FUNCTIONAL but commented out to avoid iostream inclusion
   // /**
   //  * @brief operator<< overload to ease print & debug of nodes
   //  * @param[inout] ostream to print to
diff --git a/include/aidge/graph/StaticAnalysis.hpp b/include/aidge/graph/StaticAnalysis.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..d3fe681749eeb69e4816a38f302d510f1c81381a
--- /dev/null
+++ b/include/aidge/graph/StaticAnalysis.hpp
@@ -0,0 +1,571 @@
+
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_GRAPH_STATICANALYSIS_H_
+#define AIDGE_CORE_GRAPH_STATICANALYSIS_H_
+
+#include <memory>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+#include "aidge/operator/AvgPooling.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/ReduceMean.hpp"
+#include "aidge/operator/ReduceSum.hpp"
+#include "aidge/operator/Softmax.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+
+namespace Aidge {
+/**
+ * @brief Base class to compute statistics from an Operator.
+ * 
+ */
+class OperatorStats : public Registrable<OperatorStats, std::string, std::function<std::shared_ptr<OperatorStats>(const Operator&)>> {
+public:
+    OperatorStats(const Operator& op);
+    const Operator& getOperator() const noexcept { return mOp; }
+
+    /**
+     * @brief Get the worst case total number of arithmetic operations for the 
+     * operator data flow. This includes base arithmetic operations: +, -, / and *.
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * Example of Operator with only arithmetic operations: Conv.
+     * 
+     * @return size_t Number of arithmetic operations.
+     */
+    virtual size_t getNbArithmOps() const { return 2 * getNbMACOps(); };
+
+    /**
+     * @brief Get the worst case total number of logic operations for the 
+     * operator data flow. This includes operations like logical shift, or, and...
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * Example of Operator with only logic operations: BitShift.
+     * 
+     * @return size_t Number of logic operations.
+     */
+    virtual size_t getNbLogicOps() const { return 0; };
+
+    /**
+     * @brief Get the worst case total number of comparison operations for the 
+     * operator data flow. This includes operations like <, >, =...
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * Example of Operator with only comparison operations: MaxPool.
+     * 
+     * @return size_t Number of comparison operations.
+     */
+    virtual size_t getNbCompOps() const { return 0; };
+
+    /**
+     * @brief Get the worst case total number of non-linear (NL) operations for the
+     * operator data flow. This includes operations like calls to tanh(), erf(), cos()...
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * Example of Operator with only NL operations: Tanh.
+     * Non-linear operations are necessarily of floating-point type.
+     * 
+     * @return size_t Number of non-linear (NL) operations.
+     */
+    virtual size_t getNbNLOps() const { return 0; };
+
+    /**
+     * @brief Get the worst case total number of operations for the operator data flow.
+     * Total number of operations = arithmetic ops + logic ops + comp ops + NL ops.
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * 
+     * @return size_t Number of operations.
+     */
+    size_t getNbOps() const { return getNbArithmOps() + getNbLogicOps() + getNbCompOps() + getNbNLOps(); };
+
+    /**
+     * @brief Get the worst case total number of INT arithmetic operations for
+     * the operator data flow.
+     * Such that getNbArithmOps() = getNbArithmIntOps() + getNbArithmFpOps()
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * 
+     * @return size_t Number of INT arithmetic operations.
+     */
+    virtual size_t getNbArithmIntOps() const;
+
+    /**
+     * @brief Get the worst case total number of FP arithmetic operations for 
+     * the operator data flow.
+     * Such that getNbArithmOps() = getNbArithmIntOps() + getNbArithmFpOps()
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * 
+     * @return size_t Number of FP arithmetic operations.
+     */
+    size_t getNbArithmFpOps() const { return getNbArithmOps() - getNbArithmIntOps(); };
+
+    /**
+     * @brief Get the worst case total number of MAC operations for the operator
+     * data flow. MAC operations are included in getNbArithmOps(), with 1 MAC 
+     * operation counted as 2 arithmetic operations. MAC can be INT of FP.
+     * Control flow operations (loop counters, index computation...) and memory 
+     * accesses are not included.
+     * A naive implementation is considered (more operations might be required 
+     * for numerical stability in an actual implementation).
+     * 
+     * @return size_t Number of MAC operations.
+     */
+    virtual size_t getNbMACOps() const { return 0; };
+    virtual ~OperatorStats() = default;
+
+protected:
+    const Operator &mOp;
+};
+
+/**
+ * @brief Base class to compute statistics from a GraphView
+ * 
+ */
+class StaticAnalysis : public std::enable_shared_from_this<StaticAnalysis> {
+public:
+    StaticAnalysis(std::shared_ptr<GraphView> graph);
+    const std::shared_ptr<GraphView> getGraph() const noexcept { return mGraph; }
+
+    /**
+     * @brief Get the Operator Stats object corresponding to the given node.
+     * 
+     * @param node Node
+     * @return std::shared_ptr<OperatorStats> Node's Operator stats
+     */
+    std::shared_ptr<OperatorStats> getOpStats(std::shared_ptr<Node> node) const;
+
+    /**
+     * @brief Get the number of parameters associated to a node. This includes
+     * all Producers directly connected to the node's inputs as well as all
+     * internal Producers (in case of a meta operator).
+     * 
+     * Note: this function does not check if parameters are shared between
+     * several nodes or not. This means that simply adding parameters count from
+     * several nodes may lead to a higher number of parameters than in reality
+     * if some of them are shared.
+     * 
+     * @param node Node
+     * @return size_t Number of parameters
+     */
+    virtual size_t getNbParams(std::shared_ptr<Node> node) const;
+
+    /**
+     * @brief Get the total parameters memory size, in bits, associated to a node.
+     * This includes all Producers directly connected to the node's inputs as 
+     * well as all internal Producers (in case of a meta operator).
+     * 
+     * Note: this function does not check if parameters are shared between
+     * several nodes or not. This means that simply adding parameters size from
+     * several nodes may lead to a higher parameter size than in reality
+     * if some of them are shared.
+     * 
+     * @param node Node
+     * @return size_t Total parameters memory, in bits
+     */
+    virtual size_t getParamsSize(std::shared_ptr<Node> node) const;
+
+    size_t getNbArithmOps() const { return accumulate(&OperatorStats::getNbArithmOps); }
+    size_t getNbLogicOps() const { return accumulate(&OperatorStats::getNbLogicOps); }
+    size_t getNbCompOps() const { return accumulate(&OperatorStats::getNbCompOps); }
+    size_t getNbNLOps() const { return accumulate(&OperatorStats::getNbNLOps); }
+    size_t getNbOps() const { return accumulate(&OperatorStats::getNbOps); }
+    size_t getNbArithmIntOps() const { return accumulate(&OperatorStats::getNbArithmIntOps); }
+    size_t getNbArithmFpOps() const { return accumulate(&OperatorStats::getNbArithmFpOps); }
+    size_t getNbMACOps() const { return accumulate(&OperatorStats::getNbMACOps); }
+    virtual void summary(bool incProducers = false) const;
+    virtual ~StaticAnalysis() = default;
+
+protected:
+    const std::shared_ptr<GraphView> mGraph;
+
+    size_t accumulate(size_t (OperatorStats::*func)() const) const;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+class MetaOpStats : public OperatorStats {
+public:
+    MetaOpStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<MetaOpStats> create(const Operator& op) {
+        return std::make_unique<MetaOpStats>(op);
+    }
+
+    size_t getNbArithmOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmOps(); }
+    size_t getNbLogicOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbLogicOps(); }
+    size_t getNbCompOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbCompOps(); }
+    size_t getNbNLOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbNLOps(); }
+    size_t getNbArithmIntOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbArithmIntOps(); }
+    size_t getNbMACOps() const override { return StaticAnalysis(dynamic_cast<const MetaOperator_Op&>(mOp).getMicroGraph()).getNbMACOps(); }
+};
+
+template <class OP>
+class ConvStats : public OperatorStats {
+public:
+    ConvStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ConvStats<OP>> create(const Operator& op) {
+        return std::make_unique<ConvStats<OP>>(op);
+    }
+
+    size_t getNbMACOps() const override {
+        const OP& op_ = dynamic_cast<const OP&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+	    const std::size_t weightsSize = op_.getInput(1)->size();
+        const std::size_t outputSize
+            = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
+                              op_.getOutput(0)->dims().cend(),
+                              1,
+                              std::multiplies<size_t>()); // NCHW...
+        const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
+        return batchSize * (weightsSize * outputSize);
+    }
+};
+
+// Beware: cannot use Conv_Op<2>::Type as key because static variable initialization order is undefined!
+REGISTRAR(OperatorStats, "Conv1D", ConvStats<Conv_Op<1>>::create);
+REGISTRAR(OperatorStats, "ConvDepthWise1D", ConvStats<ConvDepthWise_Op<1>>::create);
+REGISTRAR(OperatorStats, "Conv2D", ConvStats<Conv_Op<2>>::create);
+REGISTRAR(OperatorStats, "ConvDepthWise2D", ConvStats<ConvDepthWise_Op<2>>::create);
+
+template <class OP>
+class MaxPoolingStats : public OperatorStats {
+public:
+    MaxPoolingStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<MaxPoolingStats<OP>> create(const Operator& op) {
+        return std::make_unique<MaxPoolingStats<OP>>(op);
+    }
+
+    size_t getNbCompOps() const override {
+        const OP& op_ = dynamic_cast<const OP&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+	    const std::size_t poolSize
+            = std::accumulate(op_.kernelDims().cbegin(),
+                              op_.kernelDims().cend(),
+                              1,
+                              std::multiplies<size_t>());
+        const std::size_t outputSize
+            = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
+                              op_.getOutput(0)->dims().cend(),
+                              1,
+                              std::multiplies<size_t>()); // NCHW...
+        const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
+        return batchSize * ((poolSize - 1) * outputSize);
+    }
+};
+
+REGISTRAR(OperatorStats, "MaxPooling1D", MaxPoolingStats<MaxPooling_Op<1>>::create);
+REGISTRAR(OperatorStats, "MaxPooling2D", MaxPoolingStats<MaxPooling_Op<2>>::create);
+REGISTRAR(OperatorStats, "MaxPooling3D", MaxPoolingStats<MaxPooling_Op<3>>::create);
+
+template <class OP>
+class AvgPoolingStats : public OperatorStats {
+public:
+    AvgPoolingStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<AvgPoolingStats<OP>> create(const Operator& op) {
+        return std::make_unique<AvgPoolingStats<OP>>(op);
+    }
+
+    size_t getNbArithmOps() const override {
+        const OP& op_ = dynamic_cast<const OP&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+	    const std::size_t poolSize
+            = std::accumulate(op_.kernelDims().cbegin(),
+                              op_.kernelDims().cend(),
+                              1,
+                              std::multiplies<size_t>());
+        const std::size_t outputSize
+            = std::accumulate(op_.getOutput(0)->dims().cbegin() + 2,
+                              op_.getOutput(0)->dims().cend(),
+                              1,
+                              std::multiplies<size_t>()); // NCHW...
+        const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
+        // (poolSize - 1) additions + 1 division for each output
+        return batchSize * (poolSize * outputSize);
+    }
+};
+
+REGISTRAR(OperatorStats, "AvgPooling1D", AvgPoolingStats<AvgPooling_Op<1>>::create);
+REGISTRAR(OperatorStats, "AvgPooling2D", AvgPoolingStats<AvgPooling_Op<2>>::create);
+REGISTRAR(OperatorStats, "AvgPooling3D", AvgPoolingStats<AvgPooling_Op<3>>::create);
+REGISTRAR(OperatorStats, "AvgPooling4D", AvgPoolingStats<AvgPooling_Op<4>>::create);
+
+class FCStats : public OperatorStats {
+public:
+    FCStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<FCStats> create(const Operator& op) {
+        return std::make_unique<FCStats>(op);
+    }
+
+    size_t getNbMACOps() const override {
+        const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+	    const std::size_t weightsSize = op_.getInput(1)->size();
+        const std::size_t batchSize = op_.getInput(0)->dims()[0]; // NCHW
+        return batchSize * weightsSize;
+    }
+};
+
+REGISTRAR(OperatorStats, "FC", FCStats::create);
+
+class MatMulStats : public OperatorStats {
+public:
+    MatMulStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<MatMulStats> create(const Operator& op) {
+        return std::make_unique<MatMulStats>(op);
+    }
+
+    size_t getNbMACOps() const override {
+        const MatMul_Op& op_ = dynamic_cast<const MatMul_Op&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        const size_t n = (op_.getInput(0)->dims().size() > 1)
+            ? op_.getInput(0)->dims().end()[-2] : 1;
+        const size_t k = op_.getInput(0)->dims().back();
+        const size_t m = (op_.getInput(1)->dims().size() > 1)
+            ? op_.getInput(1)->dims().back() : 1;
+        const size_t nb = (op_.getInput(0)->dims().size() > 2)
+            ? std::accumulate(op_.getInput(0)->dims().cbegin(),
+                              op_.getInput(0)->dims().cend() - 2,
+                              1,
+                              std::multiplies<size_t>())
+            : 1; 
+
+        return nb * n * m * k;
+    }
+};
+
+REGISTRAR(OperatorStats, "MatMul", MatMulStats::create);
+
+class ReLUStats : public OperatorStats {
+public:
+    ReLUStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ReLUStats> create(const Operator& op) {
+        return std::make_unique<ReLUStats>(op);
+    }
+
+    size_t getNbCompOps() const override {
+        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        return op_.getOutput(0)->size();
+    }
+};
+
+REGISTRAR(OperatorStats, "ReLU", ReLUStats::create);
+
+class AbsStats : public OperatorStats {
+public:
+    AbsStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<AbsStats> create(const Operator& op) {
+        return std::make_unique<AbsStats>(op);
+    }
+
+    size_t getNbCompOps() const override {
+        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        return op_.getOutput(0)->size();
+    }
+
+    // This is in the worst case (all values are negative)
+    size_t getNbArithmOps() const override {
+        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        return op_.getOutput(0)->size();
+    }
+};
+
+REGISTRAR(OperatorStats, "Abs", AbsStats::create);
+
+class ReduceMeanStats : public OperatorStats {
+public:
+    ReduceMeanStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ReduceMeanStats> create(const Operator& op) {
+        return std::make_unique<ReduceMeanStats>(op);
+    }
+
+    size_t getNbArithmOps() const override {
+        const ReduceMean_Op& op_ = dynamic_cast<const ReduceMean_Op&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        const size_t nbIn = op_.getInput(0)->size();
+        const size_t nbOut = op_.getOutput(0)->size();
+        const size_t nbReduce = nbIn / nbOut;
+        // (nbReduce - 1) additions + 1 division for each output
+        return nbOut * nbReduce;
+    }
+};
+
+REGISTRAR(OperatorStats, "ReduceMean", ReduceMeanStats::create);
+
+class ReduceSumStats : public OperatorStats {
+public:
+    ReduceSumStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ReduceSumStats> create(const Operator& op) {
+        return std::make_unique<ReduceSumStats>(op);
+    }
+
+    size_t getNbArithmOps() const override {
+        const ReduceSum_Op& op_ = dynamic_cast<const ReduceSum_Op&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        const size_t nbIn = op_.getInput(0)->size();
+        const size_t nbOut = op_.getOutput(0)->size();
+        const size_t nbReduce = nbIn / nbOut;
+        // (nbReduce - 1) additions for each output
+        return nbOut * (nbReduce - 1);
+    }
+};
+
+REGISTRAR(OperatorStats, "ReduceSum", ReduceSumStats::create);
+
+class SoftmaxStats : public OperatorStats {
+public:
+    SoftmaxStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<SoftmaxStats> create(const Operator& op) {
+        return std::make_unique<SoftmaxStats>(op);
+    }
+
+    size_t getNbArithmOps() const override {
+        const Softmax_Op& op_ = dynamic_cast<const Softmax_Op&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        const size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
+        const size_t nbReduce = op_.getInput(0)->dims()[axis];
+        const size_t nbOut = op_.getOutput(0)->size();
+        // nbOut divisions + (nbReduce - 1) additions
+        return nbOut + (nbReduce - 1);
+    }
+
+    size_t getNbNLOps() const override {
+        const Softmax_Op& op_ = dynamic_cast<const Softmax_Op&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        const size_t axis = (op_.axis() >= 0) ? op_.axis() : op_.getInput(0)->nbDims() + op_.axis();
+        const size_t nbReduce = op_.getInput(0)->dims()[axis];
+        const size_t nbOut = op_.getOutput(0)->size();
+        // nbOut exp + nbReduce exp
+        return nbOut + nbReduce;
+    }
+};
+
+REGISTRAR(OperatorStats, "Softmax", SoftmaxStats::create);
+
+class MemOpStats : public OperatorStats {
+public:
+    MemOpStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<MemOpStats> create(const Operator& op) {
+        return std::make_unique<MemOpStats>(op);
+    }
+};
+
+REGISTRAR(OperatorStats, "Reshape", MemOpStats::create);
+REGISTRAR(OperatorStats, "Transpose", MemOpStats::create);
+REGISTRAR(OperatorStats, "Concat", MemOpStats::create);
+REGISTRAR(OperatorStats, "Split", MemOpStats::create);
+REGISTRAR(OperatorStats, "Slice", MemOpStats::create);
+REGISTRAR(OperatorStats, "Squeeze", MemOpStats::create);
+REGISTRAR(OperatorStats, "Unsqueeze", MemOpStats::create);
+REGISTRAR(OperatorStats, "Gather", MemOpStats::create);
+REGISTRAR(OperatorStats, "Identity", MemOpStats::create);
+
+class ElemWiseOpStats : public OperatorStats {
+public:
+    ElemWiseOpStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ElemWiseOpStats> create(const Operator& op) {
+        return std::make_unique<ElemWiseOpStats>(op);
+    }
+
+    size_t getNbArithmOps() const override {
+        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        return op_.getOutput(0)->size();
+    }
+};
+
+REGISTRAR(OperatorStats, "Add", ElemWiseOpStats::create);
+REGISTRAR(OperatorStats, "Sub", ElemWiseOpStats::create);
+REGISTRAR(OperatorStats, "Mul", ElemWiseOpStats::create);
+REGISTRAR(OperatorStats, "Div", ElemWiseOpStats::create);
+
+class ElemWiseLogicOpStats : public OperatorStats {
+public:
+    ElemWiseLogicOpStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ElemWiseLogicOpStats> create(const Operator& op) {
+        return std::make_unique<ElemWiseLogicOpStats>(op);
+    }
+
+    size_t getNbArithmOps() const override {
+        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        return op_.getOutput(0)->size();
+    }
+};
+
+REGISTRAR(OperatorStats, "And", ElemWiseLogicOpStats::create);
+
+class ElemWiseNLOpStats : public OperatorStats {
+public:
+    ElemWiseNLOpStats(const Operator& op) : OperatorStats(op) {}
+
+    static std::unique_ptr<ElemWiseNLOpStats> create(const Operator& op) {
+        return std::make_unique<ElemWiseNLOpStats>(op);
+    }
+
+    size_t getNbNLOps() const override {
+        const OperatorTensor& op_ = dynamic_cast<const OperatorTensor&>(mOp);
+        AIDGE_ASSERT(op_.dimsForwarded(), "Dims must be forwarded for static analysis");
+        return op_.getOutput(0)->size();
+    }
+};
+
+REGISTRAR(OperatorStats, "Atan", ElemWiseNLOpStats::create);
+REGISTRAR(OperatorStats, "Sqrt", ElemWiseNLOpStats::create);
+REGISTRAR(OperatorStats, "Erf", ElemWiseNLOpStats::create);
+REGISTRAR(OperatorStats, "Ln", ElemWiseNLOpStats::create);
+REGISTRAR(OperatorStats, "Sigmoid", ElemWiseNLOpStats::create);
+REGISTRAR(OperatorStats, "Tanh", ElemWiseNLOpStats::create);
+REGISTRAR(OperatorStats, "Pow", ElemWiseNLOpStats::create);
+}
+
+#endif /* AIDGE_CORE_GRAPH_STATICANALYSIS_H_ */
diff --git a/include/aidge/graphRegex/GraphParser.hpp b/include/aidge/graphRegex/GraphParser.hpp
index 2c25ac0b76368242891e6e5ba92c2c5fc913a23c..b165891ff1c8a55e565e3520813b707303ddfd1f 100644
--- a/include/aidge/graphRegex/GraphParser.hpp
+++ b/include/aidge/graphRegex/GraphParser.hpp
@@ -17,7 +17,7 @@ class GraphParser {
 public:
     /**
      * @brief AST graph creation function
-     * @param gRegexExpressions String representing the logical fuction to be performed
+     * @param gRegexExpressions String representing the logical function to be performed
      */
     GraphParser(const std::string gRegexExpressions);
 
diff --git a/include/aidge/graphRegex/GraphRegex.hpp b/include/aidge/graphRegex/GraphRegex.hpp
index 573447cf934b196e8b0c32d7a58e1977f5aa5f9a..f0f8e68e41a09cb54fb7528cb7f6ce065674af02 100644
--- a/include/aidge/graphRegex/GraphRegex.hpp
+++ b/include/aidge/graphRegex/GraphRegex.hpp
@@ -68,14 +68,14 @@ class GraphRegex{
 
     /**
      *  @brief brief match the queries in the graph 
-     *  @param ref the graph were the querys in search 
+     *  @param ref the graph were the queries in search 
      *  @return the result  
     */
     std::set<std::shared_ptr<MatchSolution>> match(std::shared_ptr<GraphView> ref);
 
     /***
-     *  @brief  match the queries in the graph and applied the recipes fuction  
-     *  @param ref the graph were the querys in search 
+     *  @brief  match the queries in the graph and applied the recipes function  
+     *  @param ref the graph were the queries in search 
     */
     void appliedRecipes(std::shared_ptr<GraphView> ref);
 
diff --git a/include/aidge/graphRegex/matchFsm/FsmEdge.hpp b/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
index a6cc3e59247d4be98caa9881182bfba1c44e0178..6397da49478c44ef6050c5bad77f12ba10efaca7 100644
--- a/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
@@ -60,7 +60,7 @@ namespace Aidge{
         virtual const EdgeTestResult test(const std::shared_ptr<FsmRunTimeContext> stmContext) =0;
 
         /**
-        *  @brief test is the egde test a common node
+        *  @brief test is the edge test a common node
         *  @return true if is a common
         */
         virtual bool isCommon(void);
@@ -70,7 +70,7 @@ namespace Aidge{
         */
         virtual size_t getCommonIdx(void);
         /**
-         * @brief get the relative postion to the common node deffine in this edge
+         * @brief get the relative position to the common node define in this edge
          * @return map
         */
         const std::map<size_t,int>& getRelative(void);
@@ -116,7 +116,7 @@ namespace Aidge{
     };
 
     /**
-     * @brief class specialization for not commun node (node that must be match one Unique) transition
+     * @brief class specialization for not common node (node that must be match one Unique) transition
     */
     class FsmEdgeUnique:public FsmEdge
     {
@@ -127,7 +127,7 @@ namespace Aidge{
     };
 
     /**
-     * @brief class specialization for  commun node transition
+     * @brief class specialization for  common node transition
      * @see FsmEdge
     */
     class FsmEdgeCommon:public FsmEdge
@@ -135,7 +135,7 @@ namespace Aidge{
 
         private:
         /**
-         * @brief the map that defind the ralation between the commonKey find by the lexer and a unique id use to refer to the common node
+         * @brief the map that define the relation between the commonKey find by the lexer and a unique id use to refer to the common node
         */
         static std::map<std::string,int> mCommonIdxMap;
         /**
@@ -145,7 +145,7 @@ namespace Aidge{
         public:
 
         /**
-         * @brief constructor  commun node ,
+         * @brief constructor  common node ,
          * @details during construction,
          * the node key found by the lexer is converted to a unique id and the relative positions are updated.
         */
@@ -159,7 +159,7 @@ namespace Aidge{
 
 
     /**
-     * @brief class spesialisation for ref transition
+     * @brief class specialization for ref transition
      * @see FsmEdge
     */
     class FsmEdgeRef:public FsmEdge
diff --git a/include/aidge/graphRegex/matchFsm/FsmGraph.hpp b/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
index d718009e87e5360981ff93ff808124581917c089..e7402b3f0973e4b9e7053b4d59c9ff63ca6dd496 100644
--- a/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
@@ -49,7 +49,7 @@ public:
 
     /**
      * @brief get the set of the valid states
-     * @return set of valide state
+     * @return set of valid state
     */
     const std::set<std::shared_ptr<FsmNode>> getValidNodes(void);
 
@@ -60,7 +60,7 @@ public:
     const std::set<std::shared_ptr<FsmNode>> getNodes(void);
 
     /**
-     * @brief set a groupe idx for all the nodes in the graph
+     * @brief set a group idx for all the nodes in the graph
     */
     void setGroupe(std::size_t groupeIdx);
 
diff --git a/include/aidge/graphRegex/matchFsm/FsmNode.hpp b/include/aidge/graphRegex/matchFsm/FsmNode.hpp
index 7987c5ce33522ca7d43de1918d53e68738af6d18..f4636e0e025d26fa2afae88b6ffca28a511e9509 100644
--- a/include/aidge/graphRegex/matchFsm/FsmNode.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmNode.hpp
@@ -31,10 +31,10 @@ namespace Aidge{
     /**
      * @brief is a node in the FSM graph, it's a state in the FSM
      * @details a state can be and/or :
-     * - a valide state, the match is valide if it stop on this edge
+     * - a valid state, the match is valid if it stop on this edge
      * - a start state , the match start on this state
      * The state is also define by this Origin (is the unique id of it's expretion )
-     * and it's groupe (for inner expression TODO)
+     * and it's group (for inner expression TODO)
     */
     class FsmNode : public std::enable_shared_from_this<FsmNode>
     {
@@ -84,7 +84,7 @@ namespace Aidge{
 
         bool isValid(void);
         bool isStart(void);
-        void unValid(void);
+        void invalid(void);
         void valid(void);
         void unStart(void);
         void start(void);
diff --git a/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp b/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
index 36d09db47d23395d649a688252f2af803cb1bc9d..0b44172be0c3b671043fda884efadb84ba46e215 100644
--- a/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
@@ -40,7 +40,7 @@ private:
     */
     std::map<NodePtr,std::size_t> mCommonNodes;
     /**
-     * @brief the map of the node that as been valid in this context , and the test that valide the node
+     * @brief the map of the node that as been valid in this context , and the test that valid the node
     */
     std::map<std::shared_ptr<ConditionalInterpreter>,std::set<NodePtr>> mValidNodes;
     /**
@@ -52,7 +52,7 @@ public:
      * @brief constructor
      * @param actState the actual state in the FSM
      * @param actOpNode the actual node in the graph
-     * @param idxRejeced the idx in the global regected node vector init max() as sentinel value of undefind
+     * @param idxRejeced the idx in the global regected node vector init max() as sentinel value of undefined
     */
     FsmRunTimeContext(std::shared_ptr<FsmNode> actState ,NodePtr actOpNode ,std::size_t idxRejeced =std::numeric_limits<std::size_t>::max() );
     FsmRunTimeContext(std::shared_ptr<FsmRunTimeContext> fsmRunTime);
@@ -85,7 +85,7 @@ public:
 
     /**
      * @ingroup FsmRunTimeContextTest
-     * @brief test if the actual state is valide
+     * @brief test if the actual state is valid
      * @return bool
      */
     bool isOnValidState(void);
diff --git a/include/aidge/hook/ExecTime.hpp b/include/aidge/hook/ExecTime.hpp
deleted file mode 100644
index 0964d9575b7ad345d5e07c9f19c7e56a3b69c813..0000000000000000000000000000000000000000
--- a/include/aidge/hook/ExecTime.hpp
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * \file execTime.hpp
- * \brief execTime structure
- * \version file 1.0.0
- * \date Creation 27 June 2023
- * \date 27 June 2023
- * \par ChangeLog
- * \par
- *  v1.0.0, 27 June 2023<br>
- *  - Initial version.
- * \author mn271187, ik243221
- * \copyright
- *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
- *  rights reserved.
- */
-
-#ifndef execTime_H_
-#define execTime_H_
-
-#include "aidge/operator/Operator.hpp"
-#include "aidge/hook/Hook.hpp"
-#include <memory>
-#include <chrono>
-#include <vector>
-
-namespace Aidge {
-
-class ExecTime : public Hook {
-private:
-    std::vector<std::chrono::high_resolution_clock::time_point> registeredTimes = std::vector<std::chrono::high_resolution_clock::time_point>();
-public:
-    ExecTime(const std::shared_ptr<Operator> op) : Hook(op) {}
-    ~ExecTime() = default;
-
-    void call() override final {
-        registeredTimes.push_back(std::chrono::high_resolution_clock::now());
-    }
-
-    static std::shared_ptr<ExecTime> create(const std::shared_ptr<Operator> op)
-    {
-        return std::make_shared<ExecTime>(op);
-    }
-
-    std::vector<std::chrono::high_resolution_clock::time_point> getTimes() {
-        return  registeredTimes;
-    }
-
-    std::chrono::high_resolution_clock::time_point getTime(size_t idx) {
-        return registeredTimes[idx];
-    }
-
-};
-
-namespace {
-    static Registrar<Hook> registrarHook_ExecTime({"execution_time"}, Aidge::ExecTime::create);
-}
-}
-
-#endif /* execTime_H_ */
\ No newline at end of file
diff --git a/include/aidge/hook/Hook.hpp b/include/aidge/hook/Hook.hpp
deleted file mode 100644
index 5edf231d51f913f58351b4817e145b5f48953ddd..0000000000000000000000000000000000000000
--- a/include/aidge/hook/Hook.hpp
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * \file Hook.hpp
- * \brief Hook structure
- * \version file 1.0.0
- * \date Creation 27 June 2023
- * \date 27 June 2023
- * \par ChangeLog
- * \par
- *  v1.0.0, 27 June 2023<br>
- *  - Initial version.
- * \author mn271187, ik243221
- * \copyright
- *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
- *  rights reserved.
- */
-
-#ifndef Hook_H_
-#define Hook_H_
-
-#include "aidge/utils/Attributes.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include <memory>
-
-namespace Aidge {
-
-class Operator;
-class Hook : public Registrable<Hook, std::tuple<std::string>, std::function<std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>> {
-//class Hook : public Registrable<Hook, std::tuple<std::string>, std::function<std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>>{
-protected:
-    const std::shared_ptr<Operator> mOperator;
-
-public:
-    Hook(std::shared_ptr<Operator> op) : mOperator(op) {}
-    virtual ~Hook() = default;
-
-    virtual void call() = 0;
-
-};
-}
-
-#endif /* Hook_H_ */
diff --git a/include/aidge/hook/OutputRange.hpp b/include/aidge/hook/OutputRange.hpp
deleted file mode 100644
index 355f4aaa15a6bcd77d99ec2dad344a45f8f9edc0..0000000000000000000000000000000000000000
--- a/include/aidge/hook/OutputRange.hpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * \file execTime.hpp
- * \brief execTime structure
- * \version file 1.0.0
- * \date Creation 27 June 2023
- * \date 27 June 2023
- * \par ChangeLog
- * \par
- *  v1.0.0, 27 June 2023<br>
- *  - Initial version.
- * \author ik243221
- * \copyright
- *  Copyright (c) 2023 CEA, LIST, Embedded Artificial Intelligence Laboratory. All
- *  rights reserved.
- */
-
-#ifndef AIDGE_CORE_HOOK_OUTPUTRANGE_H_
-#define AIDGE_CORE_HOOK_OUTPUTRANGE_H_
-
-#include "aidge/operator/Operator.hpp"
-#include "aidge/hook/Hook.hpp"
-#include <memory>
-#include <chrono>
-#include <vector>
-#include <cmath>
-namespace Aidge {
-
-class OutputRange : public Hook {
-private:
-    std::vector<float> registeredOutputs = std::vector<float>();
-public:
-    OutputRange(const std::shared_ptr<Operator> op) : Hook(op) {}
-    ~OutputRange() = default;
-
-    void call() override final {
-        //std::cout << "call() outputRange hook " << std::endl;
-        //this assumes there is only 1 output possible
-        std::shared_ptr<Tensor> tensor = mOperator->getOutput(0);
-        //tensor->print();
-        //std::cout << "call() outputRange hook : tensor printed" << std::endl;
-        float max_value = 0.;
-        float * casted_tensor = static_cast<float *>(tensor->getImpl()->rawPtr());
-        //find the absolute max value in the tensor, save it to registered outputs
-        for(std::size_t i = 0; i < tensor->size(); ++i) {
-            //std::cout << "call() outputRange hook : casted_tensor[i] = " << casted_tensor[i] << std::endl;
-            if(std::abs(casted_tensor[i]) > max_value){
-                max_value = std::abs(casted_tensor[i]);
-            }
-        }
-        //std::cout << "call() outputRange hook : max_value = " << max_value << std::endl;
-        registeredOutputs.push_back(max_value);
-    }
-
-    static std::shared_ptr<OutputRange> create(const std::shared_ptr<Operator> op)
-    {
-        return std::make_shared<OutputRange>(op);
-    }
-
-    std::vector<float> getOutputs() {
-        return  registeredOutputs;
-    }
-
-    float getOutput(size_t idx) {
-        return registeredOutputs[idx];
-    }
-
-};
-
-namespace {
-    static Registrar<Hook> registrarHook_OutputRange({"output_range"}, Aidge::OutputRange::create);
-}
-}
-
-#endif /* outputRange_H_ */
\ No newline at end of file
diff --git a/include/aidge/nodeTester/ConditionalData.hpp b/include/aidge/nodeTester/ConditionalData.hpp
index 12df32a728571678a3885f9981e526e1d73db785..c6c521bd9c3e1a0333bb2a6c38545bb2bf6f3fe6 100644
--- a/include/aidge/nodeTester/ConditionalData.hpp
+++ b/include/aidge/nodeTester/ConditionalData.hpp
@@ -12,7 +12,7 @@ namespace Aidge{
 
 
 /////////////////////////
-// The data type in AST Intepretation
+// The data type in AST Interpretation
 ////////////////////////
 
 class BaseConditionalValue {
diff --git a/include/aidge/nodeTester/ConditionalInterpreter.hpp b/include/aidge/nodeTester/ConditionalInterpreter.hpp
index af6a3b920bb9ca389724860d55250d7ef4540677..713a166ec2cea7781ce98c850ecbf587eca58678 100644
--- a/include/aidge/nodeTester/ConditionalInterpreter.hpp
+++ b/include/aidge/nodeTester/ConditionalInterpreter.hpp
@@ -37,7 +37,7 @@ class ConditionalRegisterFunction {
      */
     template <typename T>
     T safeCastInput( std::shared_ptr<ConditionalData> data) {
-        //cnvertion and type cheking
+        //cnvertion and type checking
         if (data->isTypeEqualTo<T>()){
             return data->getValue<T>();
         }else{
@@ -123,7 +123,7 @@ class ConditionalRegisterFunction {
      */
     template <class F, std::size_t... ParamsIdx>
     auto funcPointer(F f, std::index_sequence<ParamsIdx...>) {
-        //wrapp the lambda in a new one that as ConditionalData as inputs and output
+        //wrap the lambda in a new one that as ConditionalData as inputs and output
     	return [this,f](std::vector< std::shared_ptr<ConditionalData>>  &args) {
             if (args.size() < sizeof...(ParamsIdx)){
                 std::ostringstream errorMessage;
@@ -199,10 +199,10 @@ class ConditionalRegisterFunction {
      /**
      * @brief Runs the function associated with the given key, using the provided vector of input data.
      * @param key The key of the function to run.
-     * @param datas The vector of input data.
+     * @param data The vector of input data.
      * @return A pointer to the output ConditionalData object.
      */
-     std::shared_ptr<ConditionalData> run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & datas);
+     std::shared_ptr<ConditionalData> run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & data);
 
     bool isLambdaRegister(const std::string &key) {
         if(mWlambda.find(key) != mWlambda.end()){
@@ -237,7 +237,7 @@ class ConditionalInterpreter
      */
     std::shared_ptr<AstNode<ConditionalTokenTypes>> mTree;
     /**
-     * @brief the registery for the lambda fuction
+     * @brief the registry for the lambda function
      * @see ConditionalRegisterFunction
     */
     ConditionalRegisterFunction mLambdaRegister;
@@ -275,8 +275,8 @@ class ConditionalInterpreter
 
     /**
      * @brief Test a node depending of the ConditionalExpressions
-     * @details the AST is visit using \ref visit() whith the $ init whit the nodeOp
-     * @return bool the match node has the initialized expresion
+     * @details the AST is visit using \ref visit() with the $ init with the nodeOp
+     * @return bool the match node has the initialized expression
      * @see visit() This function uses the visit() function to perform the evaluation.
      */
     bool test( const NodePtr nodeOp);
@@ -295,7 +295,7 @@ class ConditionalInterpreter
     private:
     /**
      * @brief Recursive AST traversal function, using the for interpreting AST nodes function,
-     * using \ref ASTnodeInterpreterF fuctions
+     * using \ref ASTnodeInterpreterF functions
      * @param NodeOp The node currently being tested
      * @param nodes The AST given by the parsing process
      */
diff --git a/include/aidge/nodeTester/ConditionalLexer.hpp b/include/aidge/nodeTester/ConditionalLexer.hpp
index fcfb9ebe783ac719076ce675e6fc3d78caf5be07..0cf15d968bb6dae7532a1bcbb6c77b98ba0e42c6 100644
--- a/include/aidge/nodeTester/ConditionalLexer.hpp
+++ b/include/aidge/nodeTester/ConditionalLexer.hpp
@@ -65,7 +65,7 @@ private:
 
 /**
  * @brief Constructs an error message to display the character not understood by the lexer
- * @return error mesage
+ * @return error message
  */
 std::runtime_error badTokenError(const std::string& currentChars,std::size_t position);
 
diff --git a/include/aidge/nodeTester/ConditionalParser.hpp b/include/aidge/nodeTester/ConditionalParser.hpp
index 1f3671ea5b68008a67be5d6a63d09051d49939d5..06b0e112cfe9bba6a4f0bf32eb1b793326a357f8 100644
--- a/include/aidge/nodeTester/ConditionalParser.hpp
+++ b/include/aidge/nodeTester/ConditionalParser.hpp
@@ -34,7 +34,7 @@ class ConditionalParser {
     public:
     /**
      * @brief AST graph creation function
-     * @param ConditionalExpressions String representing the logical fuction to be performed
+     * @param ConditionalExpressions String representing the logical function to be performed
      */
     ConditionalParser(const std::string ConditionalExpressions);
 
@@ -87,7 +87,7 @@ class ConditionalParser {
     std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstLambda(void);
     /**
     * @ingroup ParsingFunctions
-    * @brief Function of grammar rules for a expresion : cmpr ((AND | OR) cmpr)*
+    * @brief Function of grammar rules for a expression : cmpr ((AND | OR) cmpr)*
     * @return AST node
     */
     std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstExpr(std::size_t precLimit = 0);
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index daf50771703d6608dbbe90364aac8667aefbdd1d..827fc0c2732695364aa2393692d7040b8b1a0e9f 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -18,8 +18,9 @@
 
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/graph/Node.hpp"
-#include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
 
@@ -28,7 +29,7 @@ class Add_Op : public OperatorTensor,
 public:
     static const std::string Type;
 
-    Add_Op(const IOIndex_t nbIn);
+    Add_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -65,7 +66,7 @@ public:
     }
 };
 
-std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "");
+std::shared_ptr<Node> Add(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_ADD_H_ */
diff --git a/include/aidge/operator/Atan.hpp b/include/aidge/operator/Atan.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f9c7d09e7a49cd8687166006eea75510aeda57ee
--- /dev/null
+++ b/include/aidge/operator/Atan.hpp
@@ -0,0 +1,52 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ATAN_H_
+#define AIDGE_CORE_OPERATOR_ATAN_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Atan_Op : public OperatorTensor,
+    public Registrable<Atan_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Atan_Op&)>> {
+public:
+    static const std::string Type;
+
+    Atan_Op();
+
+    Atan_Op(const Atan_Op& op);
+
+    std::shared_ptr<Operator> clone() const override;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> Atan(const std::string& name = "");
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ATAN_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index cdac7935f6ded752201c04b2dda6cfb9e06438ec..8f33380b29a1509c75721994f16139b9f1a9e20a 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -24,7 +24,7 @@
 
 namespace Aidge {
 
-enum class BatchNormAttr { Epsilon, Momentum };
+enum class BatchNormAttr { Epsilon, Momentum, TrainingMode };
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public OperatorTensor,
@@ -33,7 +33,7 @@ public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<BatchNormAttr, float, float>;
+    using Attributes_ = StaticAttributes<BatchNormAttr, float, float, bool>;
     template <BatchNormAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
@@ -42,7 +42,7 @@ public:
 
     BatchNorm_Op() = delete;
 
-    constexpr BatchNorm_Op(float epsilon, float momentum)
+    constexpr BatchNorm_Op(float epsilon, float momentum, bool trainingMode)
         : OperatorTensor(Type,
                             {InputCategory::Data,
                                 InputCategory::Param,
@@ -52,7 +52,9 @@ public:
                             1),
           mAttributes(std::make_shared<Attributes_>(
             attr<BatchNormAttr::Epsilon>(epsilon),
-            attr<BatchNormAttr::Momentum>(momentum))) {}
+            attr<BatchNormAttr::Momentum>(momentum),
+            attr<BatchNormAttr::TrainingMode>(trainingMode)
+            )) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -84,6 +86,7 @@ public:
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
     inline float& epsilon() const { return mAttributes->template getAttr<BatchNormAttr::Epsilon>(); }
     inline float& momentum() const { return mAttributes->template getAttr<BatchNormAttr::Momentum>(); }
+    inline bool& trainingMode() const { return mAttributes->template getAttr<BatchNormAttr::TrainingMode>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "scale", "shift", "mean", "variance"};
@@ -101,16 +104,17 @@ template <DimSize_t DIM>
 std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures,
                                        const float epsilon = 1.0e-5F,
                                        const float momentum = 0.1F,
+                                       const bool trainingMode = false,
                                        const std::string& name = "");
 }  // namespace Aidge
 
-extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&);
-extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&);
-extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&);
+extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const bool, const std::string&);
+extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const bool, const std::string&);
+extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const bool, const std::string&);
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum" };
+const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum", "training_mode" };
 }
 
 #endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..aa37b50320e9deaa94e83ff7cc3578745b560228
--- /dev/null
+++ b/include/aidge/operator/Clip.hpp
@@ -0,0 +1,122 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_CLIP_H_
+#define AIDGE_CORE_OPERATOR_CLIP_H_
+
+#include <memory>
+#include <vector>
+#include <limits>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+enum class ClipAttr { Min, Max };
+
+
+class Clip_Op : public OperatorTensor,
+    public Registrable<Clip_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Clip_Op&)>> {
+
+public:
+    static const std::string Type;
+private:
+    using Attributes_ = StaticAttributes<ClipAttr, float, float>;
+    template <ClipAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Clip_Op() = delete;
+    Clip_Op(float min,float max) : 
+    OperatorTensor(Type, {InputCategory::Data,InputCategory::OptionalData,InputCategory::OptionalData}, 1),
+    mAttributes(std::make_shared<Attributes_>(attr<ClipAttr::Min>(min), attr<ClipAttr::Max>(max)))
+    {}
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Clip_Op(const Clip_Op& op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(Clip_Op, *this, op.backend());
+        }else{
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Clip_Op
+     */
+    std::shared_ptr<Operator> clone() const override 
+    {
+        return std::make_shared<Clip_Op>(*this);
+    }
+    bool dimsForwarded() const override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
+    
+    /**
+     * @brief Setter to specify the backend to use
+     */
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    /**
+    * @brief Getter and Setter for min attribute value
+    * @return float& 
+    */
+    inline float& min() const noexcept { return mAttributes->getAttr<ClipAttr::Min>(); }
+    /**
+    * @brief Getter and Setter for max attribute value
+    * @return float& 
+    */
+    inline float& max() const noexcept { return mAttributes->getAttr<ClipAttr::Max>(); }
+
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input","min_empty_tensor","max_empty_tensor"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+    /**
+     * @brief The Clip operator is a tensor operator that performs a clipping operation on tensor elements.
+            This class allows limiting tensor values to a specified range, defined by the min 
+            and max parameters (Tensors of empty shapes). Values outside this range are replaced by the corresponding limit values
+            When ‘min’ is greater than ‘max’, the clip operator sets all the ‘input’ values to the value of ‘max’
+    * @param[in] name Name of the node
+    * @param[in] min Min clip value as attribute
+     * @param[in] max Max clip value as attribute
+    * @return std::shared_ptr<Node> 
+    */
+    std::shared_ptr<Aidge::Node> Clip(
+        const std::string &name = "",
+        float min = std::numeric_limits<float>::lowest(),
+        float max = std::numeric_limits<float>::max()
+        );
+
+}
+namespace {
+template <>
+const char* const EnumStrings<Aidge::ClipAttr>::data[]
+    = {"min", "max"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_CLIP_H_ */
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 2812da066887d63133ede2d69b5804f0b8a8101e..327f4f7c3d43b5194f23cfaed8674ee0b47bd6a2 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 class GenericOperator_Op
     : public OperatorTensor,
-      public Registrable<GenericOperator_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>> {
+      public Registrable<GenericOperator_Op, std::array<std::string, 2>, std::function<std::shared_ptr<OperatorImpl>(const GenericOperator_Op &)>> {
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
@@ -64,7 +64,7 @@ public:
     inline T& getAttr(const std::string& name)
     { return mAttributes -> template getAttr<T>(name); }
     template <class T>
-    inline const T& getAttr(const std::string& name) const
+    inline T getAttr(const std::string& name) const
     { return mAttributes -> template getAttr<T>(name); }
 
     ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
diff --git a/include/aidge/operator/Heaviside.hpp b/include/aidge/operator/Heaviside.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..65c7f341a1c5aa12eeada2165e459cc1c933e327
--- /dev/null
+++ b/include/aidge/operator/Heaviside.hpp
@@ -0,0 +1,107 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_HEAVISIDE_H_
+#define AIDGE_CORE_OPERATOR_HEAVISIDE_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class HeavisideAttr { 
+    /**
+     * @brief The value used in the output tensor when the input is 0.
+     */
+    Value 
+};
+
+class Heaviside_Op
+    : public OperatorTensor,
+      public Registrable<
+          Heaviside_Op,
+          std::string,
+          std::function<std::shared_ptr<OperatorImpl>(const Heaviside_Op &)>> {
+  private:
+    using Attributes_ = StaticAttributes<HeavisideAttr, float>;
+    template <HeavisideAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+  public:
+    static const std::string Type;
+
+    /*
+     * Compute the Heaviside step function for each element of the first input.
+     * Heaviside step function is defined as :
+     *
+     * \f[
+     * heaviside(input, values) = \begin{cases}
+     *     0      & \text{if input }  < 0 \\
+     *     values & \text{if input }  = 0 \\
+     *     1      & \text{if input }  > 0
+     * \end{cases}
+     * \f]
+     * */
+    Heaviside_Op(float value);
+
+    Heaviside_Op(const Heaviside_Op &op);
+
+    std::shared_ptr<Operator> clone() const override;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input", "data_values"};
+    }
+
+    static const std::vector<std::string> getOutputsName() {
+        return {"output"};
+    }
+
+    inline std::shared_ptr<Attributes> attributes() const override {
+        return mAttributes;
+    }
+    inline float &value() const {
+        return mAttributes->template getAttr<HeavisideAttr::Value>();
+    }
+};
+
+/**
+ * @brief Create a Heaviside node.
+ *
+ * Initializes a Heaviside node that computes the Heaviside step function for each element 
+ * of the input tensor, using the specified value for inputs equal to zero.
+ *
+ * @param value The value used in the output tensor when the input is 0.
+ * @param name  Optional. The name of the node.
+ * 
+ * @return A shared pointer to a Node representing the Heaviside operation.
+ */
+std::shared_ptr<Node> Heaviside(float value, const std::string &name = "");
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::HeavisideAttr>::data[] = {"value"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_HEAVISIDE_H_ */
diff --git a/include/aidge/operator/LRN.hpp b/include/aidge/operator/LRN.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..669564d4a8fb0da2fac4f67e164fc08653131472
--- /dev/null
+++ b/include/aidge/operator/LRN.hpp
@@ -0,0 +1,84 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_LRN_H_
+#define AIDGE_CORE_OPERATOR_LRN_H_
+
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class LRNAttr { Alpha, Beta, Bias, Size };
+
+class LRN_Op : public OperatorTensor,
+                public Registrable<LRN_Op,
+                                   std::string,
+                                   std::function<std::shared_ptr<OperatorImpl>(const LRN_Op&)>> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<LRNAttr, float, float, float, std::int32_t>;
+    template <LRNAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    LRN_Op() = delete;
+
+    LRN_Op(std::int32_t size);
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    LRN_Op(const LRN_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::LRN_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& alpha() const noexcept { return mAttributes -> getAttr<LRNAttr::Alpha>(); }
+    inline float& beta() const noexcept { return mAttributes -> getAttr<LRNAttr::Beta>(); }
+    inline float& bias() const noexcept { return mAttributes -> getAttr<LRNAttr::Bias>(); }
+    inline std::int32_t& size() const noexcept { return mAttributes -> getAttr<LRNAttr::Size>(); }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> LRN(std::int32_t size, const std::string& name = "");
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::LRNAttr>::data[] = {"alpha", "beta", "bias", "size"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_LRN_H_ */
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index ccff976cbb7cf8efc59223dfd658ca2a4d03a80b..47eb6cf97e238f820b8ef2d1f3296040e73aa43f 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -28,7 +28,7 @@
 
 namespace Aidge {
 class MetaOperator_Op : public OperatorTensor,
-                public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::function<std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)>> {
+                public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::function<std::shared_ptr<OperatorImpl>(const MetaOperator_Op &)>> {
 public:
     // outputs shared with micro-graph output Tensors
     // Micro-graph handling:
@@ -36,8 +36,11 @@ public:
     std::shared_ptr<SequentialScheduler> mScheduler;
     std::weak_ptr<Node> mUpperNode;
 
-   public:
-    MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph);
+private:
+    const std::shared_ptr<DynamicAttributes> mAttributes = std::make_shared<DynamicAttributes>();
+
+public:
+    MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph, const std::vector<InputCategory>& forcedInputsCategory = {});
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -92,7 +95,7 @@ public:
         mGraph->setDataType(datatype);
     }
 
-    std::shared_ptr<Attributes> attributes() const override;
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
     Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override;
@@ -113,6 +116,7 @@ public:
 
 std::shared_ptr<Node> MetaOperator(const char *type,
                                   const std::shared_ptr<GraphView>& graph,
+                                  const std::vector<InputCategory>& forcedInputsCategory = {},
                                   const std::string& name = "");
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index bc3348377525cdd2e5b2c030c8fc6b7cb8177e7b..481a7795e24acd006acfc66a0fccde1b8da747e7 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -126,7 +126,7 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
         MaxPooling(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims, ceil_mode)
     });
 
-    return MetaOperator("PaddedMaxPooling", graph, name);
+    return MetaOperator(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name);
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
@@ -140,7 +140,7 @@ inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<Dim
         MaxPooling(kernel_dims, "", stride_dims, ceil_mode)
     });
 
-    return std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph);
+    return std::make_shared<MetaOperator_Op>(("PaddedMaxPooling" + std::to_string(DIM) + "D").c_str(), graph);
 }
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 87aa4080e57d14d0d8a738afed2e976521b42048..e9988b4421b785a91ec170796be49c0c8df52142 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -28,7 +28,7 @@
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/hook/Hook.hpp"
+
 
 #ifdef PYBIND
 namespace py = pybind11;
@@ -50,7 +50,7 @@ enum class InputCategory {
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
     std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
-    std::map<std::string, std::shared_ptr<Hook>> mHooks;
+    std::shared_ptr<DynamicAttributes> mInheritedAttrs;
 
 private:
     std::string mType;
@@ -81,7 +81,10 @@ public:
         mImpl = nullptr;
         // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
         // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
-        // Hooks are not copied.
+    }
+    std::shared_ptr<Operator> operator()(std::shared_ptr<DynamicAttributes> attrs) {
+        mInheritedAttrs = attrs;
+        return shared_from_this();
     }
 
     virtual ~Operator() noexcept;
@@ -90,6 +93,7 @@ public:
     virtual std::shared_ptr<Operator> clone() const = 0;
 
     virtual std::shared_ptr<Attributes> attributes() const { return nullptr; };
+    virtual std::shared_ptr<DynamicAttributes> inheritedAttributes() const { return mInheritedAttrs; };
     /**
      * @brief Set the specified input with a shallow copy.
      * @param inputIdx Index of the input to set.
@@ -114,14 +118,6 @@ public:
     virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const = 0;
     virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
 
-    std::shared_ptr<Hook> getHook(const std::string& hookName) {
-        return mHooks[hookName];
-    }
-    void addHook(const std::string& hookName) {
-        mHooks.insert(std::pair<std::string, std::shared_ptr<Hook>>(hookName,Registrar<Hook>::create({hookName})(shared_from_this())));
-    }
-
-    void runHooks() const;
 
 ///////////////////////////////////////////////////////
 //        IMPLEMENTATION
@@ -153,7 +149,7 @@ public:
 
     /**
      * @brief Minimum amount of data from a specific input for one computation pass.
-     * @param inputIdx Index of the input analysed.
+     * @param inputIdx Index of the input analyzed.
      * @return Elts_t
      */
     virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
@@ -167,7 +163,7 @@ public:
     /**
      * @brief Total amount of consumed data from a specific input.
      *
-     * @param inputIdx Index of the input analysed.
+     * @param inputIdx Index of the input analyzed.
      * @return Elts_t
      */
     virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
@@ -175,7 +171,7 @@ public:
     /**
      * @brief Total amount of produced data ready to be used on a specific output.
      *
-     * @param outputIdx Index of the output analysed.
+     * @param outputIdx Index of the output analyzed.
      * @return Elts_t
      */
     virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
@@ -211,8 +207,8 @@ public:
     inline IOIndex_t nbOutputs() const noexcept { return mNbOut; };
 
     /**
-     * @brief Set the back edge input indexes for recurring operators.
-     * Any recuring operators should specify it's back edges, otherwise
+     * @brief Set the back edge input indexes for recurting operators.
+     * Any recurring operators should specify it's back edges, otherwise
      * the interpretation of the data flow graph may not be possible.
      */
     inline void setBackEdges(const std::set<IOIndex_t>& backEdges) { mBackEdges = backEdges; }
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index c8cdd93810e18bd3cdd0a2d080e54aae2d787c66..19e2f13e4ff39fee181c6ad0cf2fbab510f22c3e 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -92,7 +92,7 @@ public:
 	 * @brief Will compute the dimensions of operator's output tensor given the input sizes
  	 *        If the output dimensions cannot be computed because it depends on some undefined inputs then forwardDims will return false and enter in TOKEN mode for subsequent tensors.
  	 *        - TOKEN mode means that forwarddims will only ensure that all inputs and outputs of the graph the node is within are connected.
- 	 * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optionnal parameter tensors.
+ 	 * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optional parameter tensors.
  	 * @return true if dims have been properly forwarded. false otherwise. If set to false, then forwardDims will enter in token mode.
  	 *      
      */
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 2c670bf23d4703a5a9e8502c8b356fdde32e2561..181a4e88a0fe30e2a86c44adda2195d7e6f5293d 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -21,11 +21,20 @@
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
-enum class PadBorderType { Constant, Edge, Reflect, Wrap };
+enum class PadBorderType {
+    /** @brief all out of bound values will be set to a given value.*/
+    Constant,
+    Edge,
+    Reflect,
+    Wrap,
+    /** @brief all out of bound values will be set to 0.*/
+    Zero,
+};
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
@@ -47,7 +56,7 @@ public:
     Pad_Op() = delete;
 
     constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
-                     const PadBorderType &borderType = PadBorderType::Constant,
+                     PadBorderType borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
@@ -92,7 +101,7 @@ public:
 template <std::array<DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                         const std::string& name = "",
-                        const PadBorderType &borderType = PadBorderType::Constant,
+                        PadBorderType borderType = PadBorderType::Constant,
                         double borderValue = 0.0);
 
 // helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction
@@ -100,7 +109,7 @@ template <DimSize_t DIM>
 inline std::shared_ptr<Node> Pad(
     DimSize_t const (&beginEndTuples)[2*DIM],
     const std::string& name = "",
-    const PadBorderType &borderType = PadBorderType::Constant,
+    PadBorderType borderType = PadBorderType::Constant,
     double borderValue = 0.0)
 {
     return Pad<DIM>(to_array(beginEndTuples), name, borderType, borderValue);
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 9b264c1d3d7955f71538dd90f105cfd7ee469d0a..a9a84a3ee80eea5c0032fa08bce4ab96c44dba04 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -25,16 +25,34 @@
 
 namespace Aidge {
 
-class ReLU_Op : public OperatorTensor,
-    public Registrable<ReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>> {
+/**
+ * @brief Description of an element-wise Rectified Linear Unit (ReLU) operation
+ * on an input Tensor.
+ *
+ * For each element x in the input, the function is defined as:
+ * `f(x) = max(0, x)`
+ *
+ * The input and output Tensors have the same dimensions.
+ *
+ * @see OperatorTensor
+ * @see Registrable
+ */
+class ReLU_Op :
+    public OperatorTensor,
+    public Registrable<ReLU_Op,  // <Op, backend, implementation creation function>
+        std::string,
+        std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>>
+{
 public:
     static const std::string Type;
 
     ReLU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @brief Copy-constructor.
+     * @param op ReLU_Op to copy.
+     * @details Copies the operator attributes and its output tensor(s), but not
+     * its input tensors. The new operator has no associated input.
      */
     ReLU_Op(const ReLU_Op& op);
 
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index a48b95aff2a18750d83f12a62c408ad41b20afee..c3c7838efc16a0d091f5f0422442225cef8a0ab5 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -9,60 +9,226 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CORE_OPERATOR_Resize_H_
-#define AIDGE_CORE_OPERATOR_Resize_H_
+#ifndef AIDGE_CORE_OPERATOR_RESIZE_H_
+#define AIDGE_CORE_OPERATOR_RESIZE_H_
 
 #include <memory>
 #include <string>
 #include <vector>
 
 #include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Interpolation.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Pad.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 
-class Resize_Op : public OperatorTensor,
-                  public Registrable<Resize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Resize_Op&)>>{
+/* @brief attributes for the aidge operator */
+enum class ResizeAttr {
+    //   antialias,
+    // axes,
+    CoordinateTransformationMode,
+    CubicCoeffA,
+    // excludeOutside,
+    //   extrapolation_value,
+    //   keep_aspect_ratio_policy,
+    InterpolationMode,
+    PaddingMode
+};
 
-public:
+/**
+ * @brief Resize operator, will up/downscale a given tensor given the input.
+ * @verbatim
+ * Output size can be computed in 2 ways :
+ * 1. Image can be rescaled proportionally to the input size :
+ *    output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)
+ * 2. Output dimensions are directly given via the size input(#4)
+ *
+ * Hence, either input Scale or Input Sizes can be defined, if both are
+ * connected, the operator will throw an error.
+ *
+ * Resize takes (up to) 4 different inputs :
+ * #1 Input to resize :
+ *   N-D tensor.
+ *
+ * #2 ROI (optional) :
+ *   1-D tensor of coordinates given as [start1, …, startN, end1, …, endN]
+ *   where N is the rank of X or the length of axes, if provided. The RoIs’
+ *   coordinates are normalized in the coordinate system of the input image.
+ *   If not set default ROI is the entier image.
+ * #3 scales (optional) - tensor(float):
+ *   The scale array along each dimension.
+ *    The number of elements of ‘scales’ should be the same as the rank of
+ * input ‘X’ or the length of ‘axes’, if provided. Accepted values: (0,inf)
+ *    - (0,1)   : downsampling
+ *    - 1       : identity
+ *    - (1,inf) : upsampling
+ * #4. Sizes - tensor(int64):
+ *   Target size of the output tensor.
+ *   Its interpretation depends on the ‘keep_aspect_ratio_policy’ value.
+ *   The number of elements of ‘sizes’ should be the same as either :
+ *   - The rank of input ‘X’
+ *   - The length of ‘axes’ attribute, if provided.
+ * @endverbatim
+ * @warning : Only one of ‘scales’ and ‘sizes’ can be specified.
+ * @param coordinate_transformation_mode
+ * @param cubic_coeff_a the a coefficient of cubic interpolation. Moost often
+ * it is set to -0.75
+ * @param InterpolationMode type of interpolation (currently only support cubic
+ * interpolation)
+ */
+class Resize_Op
+    : public OperatorTensor,
+      public Registrable<
+          Resize_Op,
+          std::string,
+          std::function<std::shared_ptr<OperatorImpl>(const Resize_Op &)>> {
+
+  private:
+    using Attributes_ =
+        StaticAttributes<ResizeAttr,
+                         Interpolation::CoordinateTransformation,
+                         float,
+                         Interpolation::Mode,
+                         PadBorderType>;
+    template <ResizeAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+  public:
     static const std::string Type;
-
-    Resize_Op();
+    /**
+     * @brief creates a resize operator
+     * This node can take 4 different inputs, more details in the class
+     * doxygen.
+     * 1. Input to resize :
+     * 2. ROI NOT SUPPORTED (optional) :
+     * 3. scales (optional) - tensor(float):
+     * 4. sizes - tensor(int64):
+     * @param[in] coordinate_transformation_mode
+     * @param[in] cubic_coeff_a the a coefficient of cubic interpolation. Only
+     * used if interpolation_mode = Interpolation::Mode::Cubic
+     * @param[in] interpolationMode : Type of interpolation used for
+     * up/downsampling
+     * @warning Scales & ROI input cannot be set simultaneously. If bot are
+     * set, forward will fail.
+     * @return NodePtr
+     */
+    Resize_Op(
+        Interpolation::CoordinateTransformation coordTransfoMode,
+        Interpolation::Mode interpol_mode = Interpolation::Mode::RoundPreferFloor,
+        float cubic_coef_a = -.75f,
+        PadBorderType paddingMode = PadBorderType::Edge)
+        : OperatorTensor(Type,
+                         {InputCategory::Data,
+                          InputCategory::OptionalData,
+                          InputCategory::OptionalData,
+                          InputCategory::OptionalData},
+                         1),
+          mAttributes(std::make_shared<Attributes_>(
+              attr<ResizeAttr::CubicCoeffA>(cubic_coef_a),
+              attr<ResizeAttr::CoordinateTransformationMode>(coordTransfoMode),
+              attr<ResizeAttr::InterpolationMode>(interpol_mode),
+              attr<ResizeAttr::PaddingMode>(paddingMode))) {}
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
-     * but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output
+     * tensor(s), but not its input tensors : The new operator has no input
+     * associated).
      * @param op Operator to copy.
      */
-    Resize_Op(const Resize_Op& op);
+    Resize_Op(const Resize_Op &op)
+        : OperatorTensor(op), mAttributes(op.mAttributes) {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Resize_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Resize_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<Operator> clone() const override final {
+        return std::make_shared<Resize_Op>(*this);
+    }
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
+    void setBackend(const std::string &name,
+                    DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override {
+        return Registrar<Resize_Op>::getKeys();
+    }
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    inline Interpolation::CoordinateTransformation
+    coordinateTransformationMode() const {
+        return mAttributes
+            ->template getAttr<ResizeAttr::CoordinateTransformationMode>();
+    }
+    inline float cubicCoefA() const {
+        return mAttributes->template getAttr<ResizeAttr::CubicCoeffA>();
+    }
+    inline Interpolation::Mode interpolationMode() const {
+        return mAttributes->template getAttr<ResizeAttr::InterpolationMode>();
+    }
+    inline PadBorderType paddingMode() const {
+        return mAttributes->template getAttr<ResizeAttr::PaddingMode>();
+    }
 
-    static const std::vector<std::string> getInputsName(){
+    static const std::vector<std::string> getInputsName() {
         //  roi, scales, sizes, even if considered as const parameters/input
         return {"data_input", "roi ", "scales", "sizes"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
-std::shared_ptr<Node> Resize(const std::string &name = "");
-
-}  // namespace Aidge
-
-
-#endif /* AIDGE_CORE_OPERATOR_Resize_H_ */
\ No newline at end of file
+/**
+ * @brief creates a node that contains a resize operator
+ * This node can take 4 different inputs, more details in the class doxygen.
+ * #0 Input to resize
+ * #1 ROI NOT SUPPORTED (optional) - Tensor(double|float|float16)
+ * #2 scales (optional) - tensor(float)
+ * #3 sizes - tensor(int64)
+ * @param[in] coordinate_transformation_mode
+ * @param[in] interpolationMode type of interpolation used in case of
+ * upsampling
+ * @param[in] cubic_coeff_a the "a" coefficient of cubic interpolation. Only
+ * used if interpolation_mode = Interpolation::Mode::Cubic
+ * @warning Scales & ROI input cannot be set simultaneously. If bot are set,
+ * forward will fail.
+ * @warning Padding mode will tell how values out of bound are treated.
+ * @return NodePtr
+ */
+std::shared_ptr<Node>
+Resize(std::vector<float> scale = std::vector<float>(),
+        std::vector<std::size_t> size = std::vector<std::size_t>(),
+       Interpolation::CoordinateTransformation coordTransfoMode =
+           Interpolation::CoordinateTransformation::HalfPixel,
+       Interpolation::Mode interpolMode =
+           Interpolation::Mode::RoundPreferFloor,
+       float cubicCoefA = -.75f,
+       const std::string &name = "");
+
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ResizeAttr>::data[] = {
+    "coordinateTransformationMode",
+    "cubicCoeffA",
+    "InterpolationMode",
+    "PaddingMode"
+};
+}
+#endif /* AIDGE_CORE_OPERATOR_RESIZE_H_ */
diff --git a/include/aidge/operator/Round.hpp b/include/aidge/operator/Round.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..00352421d193eff543f1351b57f8db54ac742393
--- /dev/null
+++ b/include/aidge/operator/Round.hpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ROUND_H_
+#define AIDGE_CORE_OPERATOR_ROUND_H_
+
+#include <memory>
+#include <vector>
+#include <string>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Round_Op : public OperatorTensor,
+                public Registrable<Round_Op,
+                                std::string,
+                                std::function<std::shared_ptr<OperatorImpl>(const Round_Op&)>> {
+
+
+public:
+    static const std::string Type;
+
+    Round_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Round_Op(const Round_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Round_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> Round(const std::string& name = "");
+}
+
+
+#endif /* AIDGE_CORE_OPERATOR_ROUND_H_ */
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 4ef39f63a2f9af34cd3fe28b01cf2fc195bdfc6e..9465667babb0978e33de3cf9f155d9b2e9d495b4 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -23,6 +23,9 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+//Caution: This operator is now deprecated and should no longer be used. 
+//It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
+
 namespace Aidge {
 enum class ScalingAttr {
     ScalingFactor, QuantizedNbBits, IsOutputUnsigned
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index 24bc3321673f4dcffd3e3663f7e0a0e584389492..ceef45bf0a12315354c8b7bf378c2da834b867b1 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class Sigmoid_Op : public OperatorTensor,
-    public Registrable<Sigmoid_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Sigmoid_Op&)>> {
+    public Registrable<Sigmoid_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Sigmoid_Op&)>> {
 public:
     static const std::string Type;
 
@@ -50,4 +50,4 @@ public:
 std::shared_ptr<Node> Sigmoid(const std::string& name = "");
 }
 
-#endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 811402420df170c011e478148cf646e6c585cc84..055e6fd1d8917ae015b88a223f1f8701fd9dce59 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -87,7 +87,7 @@ public:
 };
 
 /**
- * @brief Exract a sub-Tensor from a bigger original Tensor.
+ * @brief Extract a sub-Tensor from a bigger original Tensor.
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 8c3a111c42dfeb2b4e27269839e41f3b362bdda3..5a5652388c3622bf8a46792b3c58e00c79de22f3 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -84,7 +84,7 @@ public:
 };
 
 /**
- * @brief Exract a sub-Tensor from a bigger original Tensor.
+ * @brief Extract a sub-Tensor from a bigger original Tensor.
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 64a775eb4209ecad0e29decd8336ebb77bbe652f..5c966edaf27271da79f9950cdf007cfcf446dd8d 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -53,7 +53,7 @@ enum class SqueezeAttr {
  * @brief This operator has as purpose to remove dummy dimensions around given
  * axes.
  * input#0 : Tensor to squeeze
- * input#1 Optionnal : 1D tensor that lists the axes to squeeze
+ * input#1 Optional : 1D tensor that lists the axes to squeeze
  * @note the axes to squeeze can either be given via attribute or via input #1,
  * for the sake of simplicity of the example unders, the axes to squeeze are
  * given via attribute
diff --git a/include/aidge/operator/Stack.hpp b/include/aidge/operator/Stack.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..21633e451961148deda8a3f39afd1965b3002d2e
--- /dev/null
+++ b/include/aidge/operator/Stack.hpp
@@ -0,0 +1,107 @@
+#ifndef AIDGE_CORE_OPERATOR_STACK_H_
+#define AIDGE_CORE_OPERATOR_STACK_H_
+
+#include <memory>
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class StackProdConso : public ProdConso {
+  public:
+    StackProdConso(const Operator &op) : ProdConso(op) {}
+    Elts_t getRequiredMemory(
+        const IOIndex_t outputIdx,
+        const std::vector<DimSize_t> &inputsSize) const override final;
+    void resetConsummerProducer() override;
+};
+
+class StackOpImpl : public OperatorImpl {
+  public:
+    StackOpImpl(const Operator &op, const std::string &backend = "")
+        : OperatorImpl(op, backend) {}
+
+    std::shared_ptr<ProdConso> getProdConso() const override {
+        return std::make_shared<StackProdConso>(mOp);
+    };
+    void forward() override;
+};
+
+enum class StackAttr { ForwardStep, MaxElements };
+
+class StackOp
+    : public OperatorTensor,
+      public Registrable<
+          StackOp,
+          std::string,
+          std::function<std::unique_ptr<OperatorImpl>(const StackOp &)>> {
+
+  private:
+    using Attributes_ =
+        StaticAttributes<StackAttr, std::uint32_t, std::uint32_t>;
+    template <StackAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+  public:
+    static const std::string s_type;
+
+    StackOp(std::uint32_t maxElements = 0);
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output
+     * tensor(s), but not its input tensors (the new operator has no input
+     * associated).
+     * @param op Operator to copy.
+     */
+    StackOp(const StackOp &op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::StackOp
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    void setBackend(const std::string &name,
+                    DeviceIdx_t device = 0) override final;
+
+    std::set<std::string> getAvailableBackends() const override;
+
+    bool dimsForwarded() const override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
+    void forward() override;
+
+    inline std::shared_ptr<Attributes> attributes() const override {
+        return mAttributes;
+    }
+
+    inline std::uint32_t &maxElements() const {
+        return mAttributes->template getAttr<StackAttr::MaxElements>();
+    }
+
+    inline std::uint32_t &forwardStep() const {
+        return mAttributes->template getAttr<StackAttr::ForwardStep>();
+    }
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input", "max_elements"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> Stack(std::uint32_t maxElements = 0,
+                            const std::string &name = "");
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::StackAttr>::data[] = {"max_elements"};
+}
+
+#endif
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 155627f2cfd3173ccfbbe2a1ce8c23784cd06d71..c6341e934ea415cb23a7d4ce201351a0825e6081 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -25,6 +25,11 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+/**
+ * @brief implementation of the operator Transpose.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend.
+ */
 class TransposeImpl : public OperatorImpl {
 public:
     TransposeImpl(const Operator& op, const std::string& backend = "")
@@ -33,8 +38,22 @@ public:
     void forward() override;
 };
 
-enum class TransposeAttr { OutputDimsOrder };
+enum class TransposeAttr {
+  /**
+   * @brief order of the output dims from the input dims. If left empty,
+   * the dimensions of input will be reversed.
+   */
+    OutputDimsOrder
+};
 
+/**
+ * @brief This operator has as purpose to transpose the axes of a given tensor.
+ * input#0 : Tensor to transpose
+ * @example Calling transpose() on a tensor of dimensions [1, 2, 3] with OutputDimsOrder=(1,0,2) result
+ * in a tensor of dim [2, 1, 3].
+ * @example Calling transpose() on a tensor of dimensions [1,2,3,4] with an empty OutputDimsOrder vector
+ * will result in a tensor of dim [4,3,2,1].
+ */
 class Transpose_Op : public OperatorTensor,
                 public Registrable<Transpose_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> {
 
@@ -50,6 +69,10 @@ private:
 public:
     Transpose_Op() = delete;
 
+  /**
+   * @brief constructor for Transpose op
+   * @param[in] outputDimsOrder axes permutation order. By default axes are reversed.
+   */
     Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder);
 
     /**
@@ -70,6 +93,9 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    /**
+     * @brief axes new order, if left empty, axes will be reversed.
+     */
     inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); }
 
     static const std::vector<std::string> getInputsName(){
@@ -80,8 +106,8 @@ public:
     }
 };
 
-std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
-                                           const std::string& name = "");
+std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder = {},
+                                const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
diff --git a/include/aidge/recipes/GraphViewHelper.hpp b/include/aidge/recipes/GraphViewHelper.hpp
index 3b8ba7627362c945a6bfbe587ec952fdda013e98..bfd0a5364f858c7518de4be0c18888d37ab669da 100644
--- a/include/aidge/recipes/GraphViewHelper.hpp
+++ b/include/aidge/recipes/GraphViewHelper.hpp
@@ -22,11 +22,12 @@
 namespace Aidge {
 
 /**
- * @brief Getter for every Producer operator in a GraphView.
+ * @brief Getter for every Tensor held by a Producer operator in a GraphView.
  * @param graphview GraphView instance where Producers should be searched.
+ * @param constant If true, Producer with attribute ``constant=true`` are also included in the returned set, default=false
  * @return std::set<std::shared_ptr<Node>>
  */
-std::set<std::shared_ptr<Tensor>> producers(std::shared_ptr<GraphView> graphview);
+std::set<std::shared_ptr<Tensor>> producers(std::shared_ptr<GraphView> graphview, bool constant=false);
 
 
 // TODO: change for every Tensor of Operator Producer not constant
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index a9b9213e914811ccff7d1e6d8efe4fdd8a505b87..86c722b158657633d4509c1181b1f18201d0d514 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -17,7 +17,7 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
-#include "aidge/graphRegex/matchFsm/MatchResult.hpp"
+#include "aidge/graph/Matching.hpp"
 
 
 namespace Aidge {
@@ -81,9 +81,6 @@ size_t removeIdentity(std::shared_ptr<GraphView> graph);
  */
 void removeFlatten(std::shared_ptr<Node> flatten);
 
-
-void removeFlatten(std::shared_ptr<MatchSolution> solution);
-
 /**
  * @brief Remove ``Flatten`` before :cpp:function:`Aidge::FC` Node.
  *
@@ -134,6 +131,32 @@ void explicitTranspose(std::shared_ptr<GraphView> graphView);
 */
 void expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive = false);
 
+/**
+ * @brief Tile any :cpp:function:`Aidge::MatMul` operator to several fixed size matrix multiplications.
+ * For instance, for a MatMul of size 80x80 and a tiling of 16x16, this will tile 
+ * the MatMul operator to 25 (5 by 5) MatMul operators of size 16x16, with Slice
+ * operators inserted at the inputs and Concat operators inserted at the outputs.
+ * 
+ * This is especially useful when matrix multiplication must be mapped to fixed 
+ * maximum size hardware TPU (Tensor Processing Unit) or MMA (Matrix Multiplication 
+ * Accelerator). This recipe can be combined with the :cpp:function:`Aidge::convToMatMul` recipe in 
+ * order to convert convolutions to matrix multiplication beforehand, and 
+ * :cpp:function:`Aidge::constantFolding` recipe to fold sliced constant tensors.
+ * 
+ * @param matMul MatMul operator to be tiled.
+ * @param maxDims Maximum output dimensions of the tiled MatMul operators.
+ */
+void matMulTiling(NodePtr matMul, const std::vector<DimSize_t>& maxDims);
+
+/**
+ * Fuse each sub-graph matching a query in a Meta Operator.
+ * @param gm SinglePassGraphMatching containing the graph to manipulate
+ * @param query Sub-graph matching query
+ * @param type Type name of the resulting meta operators
+ * @return size_t Number of replacement
+*/
+size_t fuseToMetaOps(SinglePassGraphMatching& gm, const std::string& query, const std::string& type = "");
+
 /**
  * Fuse each sub-graph matching a query in a Meta Operator.
  * @param graph Graph to manipulate
diff --git a/include/aidge/scheduler/MemoryManager.hpp b/include/aidge/scheduler/MemoryManager.hpp
index 2e397d1dbaa1cc8d8f586d15363cbd2245963152..880498515f36da4ecdf7f92aa7375981d5c67d10 100644
--- a/include/aidge/scheduler/MemoryManager.hpp
+++ b/include/aidge/scheduler/MemoryManager.hpp
@@ -33,7 +33,7 @@ namespace Aidge {
  * - A MemoryPlane is tailored for handling (N)HWC data with two properties:
  *   - Possibility of wrapping: on the H axis (each W*C block is contiguous).
  *   - Possibility of concatenation: on the C axis (C1+C2+...+Cn).
- * - All the sizes and offets specified in a MemoryManager are expressed in
+ * - All the sizes and offsets specified in a MemoryManager are expressed in
  *   number of data elements, or **words**, meaning currently a uniform data 
  *   precision is expected in a MemoryManager (for instance, if the precision is
  *   16-bits, each data element will be 2 bytes, which will be the size of a word).
@@ -95,9 +95,9 @@ public:
      *   with different size, like NHWC = NHW(C1+C2):
      *   - MemoryPlane#1: \p size = C1 and \p stride = C=C1+C2
      *   - MemoryPlane#2: \p size = C2 and \p stride = C=C1+C2
-     *                    (with an additionnal relative offset of +C1)
+     *                    (with an additional relative offset of +C1)
      * In this mode, wrapping can only occur on the H (\p count) axis. W*C chunks
-     * are garanteed to be contiguous (\p length * \p stride).
+     * are guaranteed to be contiguous (\p length * \p stride).
      * 
      * By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
      * there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
@@ -255,7 +255,7 @@ public:
         /// with different size, like NHWC = NHW(C1+C2):
         /// - MemoryPlane#1: \p size = C1 and \p stride = C=C1+C2
         /// - MemoryPlane#2: \p size = C2 and \p stride = C=C1+C2
-        ///                  (with an additionnal relative offset of +C1)
+        ///                  (with an additional relative offset of +C1)
         /// By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
         /// there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
         /// In this case, \p size is the total size of the MemoryPlane (H*W*C, in words).
@@ -350,7 +350,7 @@ public:
                            unsigned int length = 1,
                            unsigned int count = 1);
     /// Generate a new MemoryPlane directly following an existing MemoryPlane
-    /// memPlane with an additionnal offset extraOffset
+    /// memPlane with an additional offset extraOffset
     MemoryPlane reallocate(const MemoryPlane& memPlane,
                            unsigned int extraOffset,
                            unsigned int size,
@@ -375,7 +375,7 @@ public:
                             unsigned int length = 1,
                             unsigned int count = 1);
     /// Generate a new MemoryPlane directly following an existing MemoryPlane
-    /// memPlane with an additionnal offset extraOffset
+    /// memPlane with an additional offset extraOffset
     unsigned int reallocate(const MemoryPlane& memPlane,
                             const std::shared_ptr<Node>& node,
                             unsigned int extraOffset,
diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp
index a7c0ed5ae73d1f891744e835f0da5ad14a37f850..cfc83cbf91cb7eeef2a3bbb0a4c5017a2480fe9b 100644
--- a/include/aidge/scheduler/ProdConso.hpp
+++ b/include/aidge/scheduler/ProdConso.hpp
@@ -37,21 +37,25 @@ public:
      * @brief Minimum amount of data from a specific input required by the
      * implementation to be run.
      *
-     * @param inputIdx Index of the input analysed.
+     * @param inputIdx Index of the input analyzed.
      * @return std::size_t
      */
     virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
-    // Amount of input data that cannot be overwritten during the execution.
+    /**
+     * @brief Amount of input data that cannot be overwritten during the execution.
+     */
     virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
 
-    // Memory required at an output for a given input size.
+    /**
+     * @brief Memory required at an output for a given input size.
+     */
     virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
 
     /**
      * @brief Total amount of consumed data from a specific input.
      *
-     * @param inputIdx Index of the input analysed.
+     * @param inputIdx Index of the input analyzed.
      * @return DimSize_t
      */
     virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
@@ -59,19 +63,19 @@ public:
     /**
      * @brief Total amount of produced data ready to be used on a specific output.
      *
-     * @param outputIdx Index of the output analysed.
+     * @param outputIdx Index of the output analyzed.
      * @return DimSize_t
      */
     virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
 
     /**
-     * @brief Update the Consummer Producer system by simulating the consumption and production of i/o
+     * @brief Update the Consumer Producer system by simulating the consumption and production of i/o
      *
      */
     virtual void updateConsummerProducer();
 
     /**
-     * @brief Reset the Consummer Producer system.
+     * @brief Reset the Consumer Producer system.
      *
      */
     virtual void resetConsummerProducer();
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 2d03f4e8b8d5ce9c74f1d140a2e13317decc8dac..28ecde6d9319ae05be20f591cc9a6a4e2a29acc0 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -82,6 +82,14 @@ protected:
         std::chrono::time_point<std::chrono::high_resolution_clock> end; /** Actual end time of execution */
     };
 public:
+    enum class AvailableDataStatus {
+        Connected,
+        UpperNodeInputFound,
+        UpperNodeInputConnected,
+        ValidTensor,
+        NotConnected
+    };
+
     /**
      * @struct PriorProducersConsumers
      * @brief Manages producer-consumer relationships for nodes.
@@ -179,7 +187,7 @@ protected:
      */
     std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const;
 
-    Elts_t getNbAvailableData(const std::shared_ptr<Node>& node, const IOIndex_t inputIdx) const;
+    Elts_t getNbAvailableData(const std::shared_ptr<Node>& node, const IOIndex_t inputIdx, AvailableDataStatus& status) const;
 
     /**
      * @brief Get the prior producers and consumers for a node.
@@ -190,7 +198,7 @@ protected:
 
     /**
      * @brief Generate an initial base scheduling for the GraphView.
-     * The scheduling is entirely sequential and garanteed to be valid w.r.t.
+     * The scheduling is entirely sequential and guaranteed to be valid w.r.t.
      * each node producer-consumer model.
      * @return Vector of pointers to `StaticSchedulingElement` representing the base schedule.
     */
@@ -233,4 +241,23 @@ protected:
 };
 } // namespace Aidge
 
+namespace Aidge {
+inline auto format_as(Scheduler::AvailableDataStatus status) {
+    switch (status) {
+    case Scheduler::AvailableDataStatus::Connected:
+        return "The input is connected to a Node.";
+    case Scheduler::AvailableDataStatus::UpperNodeInputFound:
+        return "The input is an upper node input, but is not connected in any GraphView.";
+    case Scheduler::AvailableDataStatus::UpperNodeInputConnected:
+        return "The input is an upper node input and is connected to a Node.";
+    case Scheduler::AvailableDataStatus::ValidTensor:
+        return "The input is not connected in the current GraphView but has a valid tensor assigned.";
+    case Scheduler::AvailableDataStatus::NotConnected:
+        return "The input is not connected in the current GraphView.";
+    default:
+        return "UNKNOWN STATUS.";
+    }
+}
+}
+
 #endif /* AIDGE_CORE_SCHEDULER_SCHEDULER_H_ */
diff --git a/include/aidge/stimuli/Stimulus.hpp b/include/aidge/stimuli/Stimulus.hpp
index 3def790b65f441c567e5d43150f465233cb64557..af21d7912314c3eea1217811ae3e2b2da47a7a66 100644
--- a/include/aidge/stimuli/Stimulus.hpp
+++ b/include/aidge/stimuli/Stimulus.hpp
@@ -23,8 +23,8 @@
 
 namespace Aidge {
 /**
- * @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionnaly store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
- * @details When Stimulus is used in the first mode, the loading function is determined automaticaly based on the backend and the file extension.
+ * @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionally store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
+ * @details When Stimulus is used in the first mode, the loading function is determined automatically based on the backend and the file extension.
  */
 class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::function<std::unique_ptr<StimulusImpl>(const std::string&)>> {
 private:
diff --git a/include/aidge/utils/ArrayHelpers.hpp b/include/aidge/utils/ArrayHelpers.hpp
index 6648c654d28197dc018b94e8fa300366af52db4a..45a4c3c37da59e369bae2bb7e934c54bd844088d 100644
--- a/include/aidge/utils/ArrayHelpers.hpp
+++ b/include/aidge/utils/ArrayHelpers.hpp
@@ -85,7 +85,7 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a, std::index_sequen
  * @details append({1,2,7}, 3) -> {1,2,7,3}
  *
  * @tparam T Data type.
- * @tparam N Number of elements in the initilial array.
+ * @tparam N Number of elements in the initial array.
  * @param a Initial array.
  * @param t Element to add.
  * @return constexpr std::array<T, N + 1>
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index cf71ed0b5953fa1759e04c66311d3d829a603a01..f73de2ea31681a60c85229102a9b2ebbce4d3c3e 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -69,14 +69,12 @@ public:
     virtual std::map<std::string, future_std::any> getAttrs() const = 0;
 
 #ifdef PYBIND
-    virtual bool hasAttrPy(const std::string& name) const = 0;
-
-    /* Bindable get function, does not recquire any templating.
+    /* Bindable get function, does not require any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from its return type.
     */
     virtual py::object getAttrPy(const std::string& name) const  = 0;
-    /* Bindable set function, does not recquire any templating.
+    /* Bindable set function, does not require any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from ``value`` type.
     */
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 04ed58f7e636d6a0d528f1946ead110857312576..3ecd4da393eaac9881d008e27989a52e883ecb6a 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -48,41 +48,25 @@ public:
      * \param name Attribute name
      * \details assert if T is not the actual Attribute type or if the Attribute does not
      *  exist
-     * \note at() throws if the Attribute does not exist, using find to test for Attribute existance
+     * \note at() throws if the Attribute does not exist, using find to test for Attribute existence
      */
-    template<class T> const T& getAttr(const std::string& name) const
+    template<class T> T getAttr(const std::string& name) const
     {
-        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
-            [](const future_std::any& lhs, const future_std::any& rhs) {
-#ifdef PYBIND
-                if (lhs.type() == typeid(py::object)) {
-                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
-                }
-                else if (rhs.type() == typeid(py::object)) {
-                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
-                }
-                else
-#endif
-                {
-                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
-                }
-            }));
-
         const auto dot = name.find('.');
         if (dot == name.npos) {
+            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+
+            const auto& attr = mAttrs.at(name);
 #ifdef PYBIND
-            // If attribute does not exist in C++, it might have been created or modified in Python
-            auto it = mAttrs.find(name);
-            if (it == mAttrs.end()) {
-                auto itPy = mAttrsPy.find(name);
-                if (itPy != mAttrsPy.end()) {
-                    // Insert the attribute back in C++
-                    mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
-                }
+            if (attr.type() == typeid(py::object)) {
+                // Note: because of cast<T>(), this function cannot return a const reference!
+                return future_std::any_cast<const py::object&>(attr).cast<T>();
             }
+            else
 #endif
-
-            return future_std::any_cast<const T&>(mAttrs.at(name));
+            {
+                return future_std::any_cast<const T&>(attr);
+            }
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -92,9 +76,21 @@ public:
     }
 
     template<class T> T& getAttr(const std::string& name) {
-        // Scott Meyers' solution to avoid code duplication
-        return const_cast<T&>(
-            static_cast<const DynamicAttributes&>(*this).getAttr<T>(name));
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+
+            auto& attr = mAttrs.at(name);
+#ifdef PYBIND
+            AIDGE_ASSERT(attr.type() != typeid(py::object), "getAttr(): cannot return a reference to a Python-defined attribute.");
+#endif
+            return future_std::any_cast<T&>(attr);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<DynamicAttributes&>(mAttrs.at(ns)).getAttr<T>(nsName);
+        }
     }
 
     ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
@@ -103,35 +99,12 @@ public:
     ///\param value Attribute value
     template<class T> void addAttr(const std::string& name, const T& value)
     {
-        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
-            [](const future_std::any& lhs, const future_std::any& rhs) {
-#ifdef PYBIND
-                if (lhs.type() == typeid(py::object)) {
-                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
-                }
-                else if (rhs.type() == typeid(py::object)) {
-                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
-                }
-                else
-#endif
-                {
-                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
-                }
-            }));
-
         const auto dot = name.find('.');
         if (dot == name.npos) {
+            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+
             const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
             AIDGE_ASSERT(res.second, "addAttr(): attribute \"{}\" already exists. Use setAttr() if this is expected.", name);
-
-#ifdef PYBIND
-            // We cannot handle Python object if the Python interpreter is not running
-            if (Py_IsInitialized()) {
-                // Keep a copy of the attribute in py::object that is updated everytime
-                const auto& resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
-                AIDGE_ASSERT(resPy.second, "addAttr(): attribute \"{}\" already exists (added in Python). Use setAttr() if this is expected.", name);
-            }
-#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -147,37 +120,13 @@ public:
     ///\param value Attribute value
     template<class T> void setAttr(const std::string& name, const T& value)
     {
-        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
-            [](const future_std::any& lhs, const future_std::any& rhs) {
-#ifdef PYBIND
-                if (lhs.type() == typeid(py::object)) {
-                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
-                }
-                else if (rhs.type() == typeid(py::object)) {
-                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
-                }
-                else
-#endif
-                {
-                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
-                }
-            }));
-
         const auto dot = name.find('.');
         if (dot == name.npos) {
+            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
+
             auto res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
             if (!res.second)
                 res.first->second = future_std::any(value);
-
-#ifdef PYBIND
-            // We cannot handle Python object if the Python interpreter is not running
-            if (Py_IsInitialized()) {
-                // Keep a copy of the attribute in py::object that is updated everytime
-                auto resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
-                if (!resPy.second)
-                    resPy.first->second = std::move(py::cast(value));
-            }
-#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -191,9 +140,6 @@ public:
         const auto dot = name.find('.');
         if (dot == name.npos) {
             mAttrs.erase(name);
-#ifdef PYBIND
-            mAttrsPy.erase(name);
-#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -205,41 +151,12 @@ public:
 #ifdef PYBIND
     void addAttrPy(const std::string& name, py::object&& value)
     {
-        const auto dot = name.find('.');
-        if (dot == name.npos) {
-            auto it = mAttrs.find(name);
-            AIDGE_ASSERT(it == mAttrs.end(), "add_attr(): attribute \"{}\" already exists (added in C++). Use set_attr() if this is expected.", name);
-
-            const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
-            AIDGE_ASSERT(res.second, "add_attr(): attribute \"{}\" already exists. Use set_attr() if this is expected.", name);
-        }
-        else {
-            const auto ns = name.substr(0, dot);
-            const auto nsName = name.substr(dot + 1);
-            const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes()));
-
-            future_std::any_cast<DynamicAttributes&>(res.first->second).addAttrPy(nsName, std::move(value));
-        }
+        addAttr(name, std::move(value));
     }
 
     void setAttrPy(const std::string& name, py::object&& value) override final
     {
-        const auto dot = name.find('.');
-        if (dot == name.npos) {
-            auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
-            if (!resPy.second)
-                resPy.first->second = std::move(value);
-
-            // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
-            mAttrs.erase(name);
-        }
-        else {
-            const auto ns = name.substr(0, dot);
-            const auto nsName = name.substr(dot + 1);
-            const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes()));
-
-            future_std::any_cast<DynamicAttributes&>(res.first->second).setAttrPy(nsName, std::move(value));
-        }
+        setAttr(name, std::move(value));
     }
 
     py::dict dict() const override {
@@ -248,9 +165,16 @@ public:
             if (elt.second.type() == typeid(DynamicAttributes)) {
                 attributes[elt.first.c_str()] = future_std::any_cast<const DynamicAttributes&>(elt.second).dict();
             }
-        }
-        for (const auto& elt : mAttrsPy) {
-            attributes[elt.first.c_str()] = elt.second;
+            else {
+                // At this point, not every attribute may be known to mAnyUtils
+                const auto anyUtilsIt = mAnyUtils.find(elt.second.type());
+                if (anyUtilsIt != mAnyUtils.end()) {
+                    attributes[elt.first.c_str()] = anyUtilsIt->second->cast(elt.second);
+                }
+                else {
+                    attributes[elt.first.c_str()] = "???";
+                }
+            }
         }
         return attributes;
     }
@@ -273,12 +197,7 @@ public:
     bool hasAttr(const std::string& name) const override final {
         const auto dot = name.find('.');
         if (dot == name.npos) {
-#ifdef PYBIND
-            return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
-
-#else
             return (mAttrs.find(name) != mAttrs.cend());
-#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -293,45 +212,22 @@ public:
         }
     }
 
-#ifdef PYBIND
-    bool hasAttrPy(const std::string& name) const override final {
-        const auto dot = name.find('.');
-        if (dot == name.npos) {
-            // Attributes might have been created in Python, the second condition is necessary.
-            return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
-        }
-        else {
-            const auto ns = name.substr(0, dot);
-            const auto it = mAttrs.find(ns);
-            if (it != mAttrs.cend()) {
-                const auto nsName = name.substr(dot + 1);
-                return future_std::any_cast<const DynamicAttributes&>(it->second).hasAttrPy(nsName);
-            }
-            else {
-                return false;
-            }
-        }
-    }
-#endif
-
     std::string getAttrType(const std::string& name) const override final {
         // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is:
         // - C-style for C++ created attributes
         // - Python-style for Python created attributes
         const auto dot = name.find('.');
         if (dot == name.npos) {
+            const auto& attr = mAttrs.at(name);
 #ifdef PYBIND
-            // If attribute does not exist in C++, it might have been created in Python
-            auto it = mAttrs.find(name);
-            if (it == mAttrs.end()) {
-                auto itPy = mAttrsPy.find(name);
-                if (itPy != mAttrsPy.end()) {
-                    return std::string(Py_TYPE(itPy->second.ptr())->tp_name);
-                }
+            if (attr.type() == typeid(py::object)) {
+                return std::string(Py_TYPE(future_std::any_cast<const py::object&>(attr).ptr())->tp_name);
             }
+            else
 #endif
-
-            return mAttrs.at(name).type().name();
+            {
+                return attr.type().name();
+            }
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -344,33 +240,20 @@ public:
         std::set<std::string> attrsName;
         for(auto const& it: mAttrs)
             attrsName.insert(it.first);
-#ifdef PYBIND
-        // Attributes might have been created in Python
-        for(auto const& it: mAttrsPy)
-            attrsName.insert(it.first);
-#endif
         return attrsName;
     }
 
 #ifdef PYBIND
     /**
      * @detail See https://github.com/pybind/pybind11/issues/1590 as to why a
-     * generic type caster for std::any is not feasable.
-     * The strategy here is to keep a copy of each attribute in py::object that is updated everytime.
+     * generic type caster for std::any is not feasible.
+     * The strategy here is to store a cast() function for each attribute type ever used.
     */
     inline py::object getAttrPy(const std::string& name) const override final {
         const auto dot = name.find('.');
         if (dot == name.npos) {
-            auto itPy = mAttrsPy.find(name);
-            if (itPy == mAttrsPy.end()) {
-                // Attribute may be a namespace
-                auto it = mAttrs.find(name);
-                AIDGE_ASSERT(it != mAttrs.end() && it->second.type() == typeid(DynamicAttributes), "get_attr(): attribute \"{}\" not found", name);
-                return py::cast(future_std::any_cast<const DynamicAttributes&>(it->second));
-            }
-            else {
-                return itPy->second;
-            }
+            const auto& attr = mAttrs.at(name);
+            return mAnyUtils.at(attr.type())->cast(attr);
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -384,24 +267,6 @@ public:
     {
         const auto dot = name.find('.');
         if (dot == name.npos) {
-#ifdef PYBIND
-            // If attribute does not exist in C++, it might have been created or modified in Python
-            auto it = mAttrs.find(name);
-            if (it == mAttrs.end()) {
-                auto itPy = mAttrsPy.find(name);
-                if (itPy != mAttrsPy.end()) {
-                    // Attribute exists in Python, but its type is not known
-                    // Return a std::any of py::object, which will be comparable
-                    mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(py::object),
-                        [](const future_std::any& lhs, const future_std::any& rhs) {
-                            return (future_std::any_cast<py::object>(lhs) < future_std::any_cast<py::object>(rhs));
-                        }));
-
-                    return future_std::any(itPy->second);
-                }
-            }
-#endif
-
             return mAttrs.at(name);
         }
         else {
@@ -415,34 +280,129 @@ public:
         return mAttrs;
     }
 
-    virtual ~DynamicAttributes() {}
+    virtual ~DynamicAttributes() {
+#ifdef PYBIND
+        if (!Py_IsInitialized()) {
+            // Resets the internal pointer of py::object to nullptr without decreasing the object's reference count.
+            // At this point, the Python interpreter may have exited (it is the case if the current DynamicAttribute being destroyed is static),
+            // in which case py:object has already being destroyed despite the reference counting being > 0.
+            // See https://github.com/pybind/pybind11/issues/1598
+            for (auto& attr : mAttrs) {
+                if (attr.second.type() == typeid(py::object)) {
+                    future_std::any_cast<py::object&>(attr.second).release();
+                }
+            }
+        }
+#endif
+    }
 
     friend bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs);
+    friend struct std::hash<DynamicAttributes>;
 
 private:
-#ifdef PYBIND
-    // Stores C++ attributes (copy) and Python-only attributes
-    // Code should be compiled with -fvisibility=hidden
-    // See https://pybind11.readthedocs.io/en/stable/faq.html:
-    // “‘SomeClass’ declared with greater visibility than the type of its
-    // field ‘SomeClass::member’ [-Wattributes]”
-    // This map will only be populated if Python interpreter is running
-    std::map<std::string, py::object> mAttrsPy;
-    // Stores C++ attributes only
-    // mutable because it may be updated in getAttr() from Python
-    mutable std::map<std::string, future_std::any> mAttrs;
-#else
     std::map<std::string, future_std::any> mAttrs;
-#endif
 
 public:
-    // Stores the comparison function for each attribute type ever used
-    static std::map<std::type_index, bool(*)(const future_std::any&, const future_std::any&)> mAnyCompare;
+    struct AnyUtils_ {
+#ifdef PYBIND
+        virtual py::object cast(const future_std::any& attr) const = 0;
+#endif
+        virtual bool compare(const future_std::any&, const future_std::any&) const = 0;
+        virtual size_t hash(const future_std::any&) const = 0;
+        virtual ~AnyUtils_() = default;
+    };
+
+    template <class T>
+    struct AnyUtils : public AnyUtils_ {
+#ifdef PYBIND
+        py::object cast(const future_std::any& attr) const override final {
+            return py::cast(future_std::any_cast<const T&>(attr));
+        }
+#endif
+
+        bool compare(const future_std::any& lhs, const future_std::any& rhs) const override final {
+#ifdef PYBIND
+            if (lhs.type() == typeid(py::object) && rhs.type() != typeid(py::object)) {
+                return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
+            }
+            else if (lhs.type() != typeid(py::object) && rhs.type() == typeid(py::object)) {
+                return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
+            }
+            else
+#endif
+            {
+                return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
+            }
+        }
+
+        size_t hash(const future_std::any& attr) const override final {
+            return std::hash<T>()(future_std::any_cast<T>(attr));
+        }
+    };
+
+    // Stores typed utils functions for each attribute type ever used
+    static std::map<std::type_index, std::unique_ptr<AnyUtils_>> mAnyUtils;
 };
 
+template<> void DynamicAttributes::setAttr<future_std::any>(const std::string& name, const future_std::any& value);
+
+#ifdef PYBIND
+template <>
+struct DynamicAttributes::AnyUtils<py::object> : public DynamicAttributes::AnyUtils_ {
+    py::object cast(const future_std::any& attr) const override {
+        return future_std::any_cast<const py::object&>(attr);
+    }
+
+    bool compare(const future_std::any& lhs, const future_std::any& rhs) const override {
+        return (future_std::any_cast<py::object>(lhs) < future_std::any_cast<py::object>(rhs));
+    }
+
+    size_t hash(const future_std::any& attr) const override final {
+        // Here we are mixing Python and C++ hashes... if both are
+        // well implemented, this should not increase the collision 
+        // probability for the same number of stored hashes.
+        return py::hash(future_std::any_cast<py::object>(attr));
+    }
+};
+#endif
+
 inline bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs) {
     return (lhs.mAttrs < rhs.mAttrs);
 }
+
+// Combine the hashes (boost-like hash combining, see boost::hash_combine())
+inline void hash_combine(std::size_t& seed, const std::size_t& value) {
+    seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2);
+}
+}
+
+namespace std {
+    // Make DynamicAttributes hashable so that is can be stored in hash-based containers.
+    // This is particularly useful in Python since set() and dict() are hash-based.
+    template <>
+    struct hash<Aidge::DynamicAttributes> {
+        size_t operator()(const Aidge::DynamicAttributes& attrs) const {
+            std::size_t seed = 0;
+            for (const auto& pair : attrs.mAttrs) {
+                Aidge::hash_combine(seed, std::hash<std::string>()(pair.first));
+                Aidge::hash_combine(seed, Aidge::DynamicAttributes::mAnyUtils.at(pair.second.type())->hash(pair.second));
+            }
+            return seed;
+        }
+    };
+
+    // General specialization of std::hash for any container that has iterators (e.g., std::vector, std::list, std::set)
+    template <template <typename...> class Container, typename T, typename... Args>
+    struct hash<Container<T, Args...>> {
+        std::size_t operator()(const Container<T, Args...>& iterable) const {
+            std::size_t seed = 0;
+            for (const auto& v : iterable) {
+                // Recursively hash the value pointed by the iterator
+                Aidge::hash_combine(seed, std::hash<T>()(v));
+            }
+            return seed;
+        }
+    };
 }
 
 namespace future_std {
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index 6b2ace1c6aa013ae81e5144665e2edde830cdc54..d6851f1e42233f9d8af88d10da9046f73f94b8c4 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -9,7 +9,6 @@
  *
  ********************************************************************************/
 
-
 #ifndef AIDGE_LOG_H_
 #define AIDGE_LOG_H_
 
@@ -19,44 +18,36 @@
 #include <fmt/format.h>
 #include <fmt/ranges.h>
 
+#include "aidge/data/half_fmt.hpp"
+
 #include "aidge/utils/Attributes.hpp"
 
 namespace Aidge {
 /**
- * Helper to define a context anywhere, hidding the scoped variable name
+ * Helper to define a context anywhere, hiding the scoped variable name
  * which has no relevance.
-*/
-#define AIDGE_LOG_CONTEXT(...) const Log::Context logContext_##__LINE__(__VA_ARGS__)
-
+ */
+#define AIDGE_LOG_CONTEXT(...)                                                \
+    const Log::Context logContext_##__LINE__(__VA_ARGS__)
 
-template<class U>
-static void discard_args(U parg) {
+template <class U> static void discard_args(U parg) {
     (void)parg;
 }
-template<class U, class... Us>
-static void discard_args(U parg, Us... pargs) {
+template <class U, class... Us> static void discard_args(U parg, Us... pargs) {
     (void)parg;
     discard_args(pargs...);
 }
 
 /**
  * Aidge logging class, for displaying and file logging of events.
-*/
+ */
 class Log {
-public:
-    enum Level {
-        Debug = 0,
-        Info,
-        Notice,
-        Warn,
-        Error,
-        Fatal
-    };
+  public:
+    enum Level { Debug = 0, Info, Notice, Warn, Error, Fatal };
 
     class Context {
-    public:
-        template <typename... Args>
-        Context(Args&&... args) {
+      public:
+        template <typename... Args> Context(Args &&...args) {
             Log::mContext.push_back(fmt::format(std::forward<Args>(args)...));
         }
 
@@ -68,13 +59,12 @@ public:
     /**
      * Detailed messages for debugging purposes, providing information helpful
      * for developers to trace and identify issues.
-     * Detailed insights of what is appening in an operation, not useful for the
-     * end-user. The operation is performed nominally.
+     * Detailed insights of what is happening in an operation, not useful for
+     * the end-user. The operation is performed nominally.
      * @note This level is disabled at compile time for Release, therefore
      * inducing no runtime overhead for Release.
-    */
-    template <typename... Args>
-    static void debug(Args&&... args) {
+     */
+    template <typename... Args> static void debug(Args &&...args) {
 #ifndef NDEBUG
         // only when compiled in Debug
         log(Debug, fmt::format(std::forward<Args>(args)...));
@@ -86,22 +76,19 @@ public:
     /**
      * Messages that provide a record of the normal operation, about
      * the application's state, progress, or important events.
-     * Reports normal start, end and key steps in an operation. The operation is
-     * performed nominally.
-    */
-    template <typename... Args>
-    static void info(Args&&... args) {
+     * Reports normal start, end and key steps in an operation. The operation
+     * is performed nominally.
+     */
+    template <typename... Args> static void info(Args &&...args) {
         log(Info, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Applies to normal but significant conditions that may require monitoring,
-     * like unusual or normal fallback events.
-     * Reports specific paths in an operation. The operation can still be
-     * performed normally.
-    */
-    template <typename... Args>
-    static void notice(Args&&... args) {
+     * Applies to normal but significant conditions that may require
+     * monitoring, like unusual or normal fallback events. Reports specific
+     * paths in an operation. The operation can still be performed normally.
+     */
+    template <typename... Args> static void notice(Args &&...args) {
         log(Notice, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -110,9 +97,8 @@ public:
      * not necessarily cause immediate problems.
      * Some specific steps of the operation could not be performed, but it can
      * still provide an exploitable result.
-    */
-    template <typename... Args>
-    static void warn(Args&&... args) {
+     */
+    template <typename... Args> static void warn(Args &&...args) {
         log(Warn, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -121,26 +107,24 @@ public:
      * recover from, but attention is needed to prevent further issues.
      * The operation could not be performed, but it does not prevent potential
      * further operations.
-    */
-    template <typename... Args>
-    static void error(Args&&... args) {
+     */
+    template <typename... Args> static void error(Args &&...args) {
         log(Error, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
-     * Represents a critical error or condition that leads to the termination of
-     * the application, indicating a severe and unrecoverable problem.
-     * The operation could not be performed and any further operation is
+     * Represents a critical error or condition that leads to the termination
+     * of the application, indicating a severe and unrecoverable problem. The
+     * operation could not be performed and any further operation is
      * impossible.
-    */
-    template <typename... Args>
-    static void fatal(Args&&... args) {
+     */
+    template <typename... Args> static void fatal(Args &&...args) {
         log(Fatal, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
      * Set the minimum log level displayed in the console.
-    */
+     */
     static void setConsoleLevel(Level level) {
         mConsoleLevel = level;
     }
@@ -148,14 +132,14 @@ public:
     /**
      * Set or disable colors on console.
      * Initial value should be assumed true.
-    */
+     */
     static void setConsoleColor(bool enabled) {
         mConsoleColor = enabled;
     }
 
     /**
      * Set the minimum log level saved in the log file.
-    */
+     */
     constexpr static void setFileLevel(Level level) {
         mFileLevel = level;
     }
@@ -164,8 +148,8 @@ public:
      * Set the log file name.
      * Close the current log file and open the one with the new file name.
      * If empty, stop logging into a file.
-    */
-    static void setFileName(const std::string& fileName) {
+     */
+    static void setFileName(const std::string &fileName) {
         if (fileName != mFileName) {
             mFileName = fileName;
             mFile.release();
@@ -176,6 +160,22 @@ public:
         }
     }
 
+    /**
+     * @struct fcloseDeleter
+     * @brief Custom deleter to prevent compiler warnings when using
+     * std::unique_ptr with FILE*.
+     *
+     * Using function pointers with attributes (e.g., __attribute__((nonnull)))
+     * as deleters can trigger warnings like -Wignored-attributes. By defining a
+     * custom deleter struct, these attributes are preserved, eliminating the
+     * warnings.
+     */
+    struct fcloseDeleter {
+        void operator()(FILE *f) const noexcept {
+            std::fclose(f);
+        }
+    };
+
 private:
     static void log(Level level, const std::string& msg);
     static void initFile(const std::string& fileName);
@@ -184,14 +184,15 @@ private:
     static bool mConsoleColor;
     static Level mFileLevel;
     static std::string mFileName;
-    static std::unique_ptr<FILE, decltype(&std::fclose)> mFile;
+    static std::unique_ptr<FILE, fcloseDeleter> mFile;
     static std::vector<std::string> mContext;
 };
-}
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::Log::Level>::data[] = {"Debug", "Info", "Notice", "Warn", "Error", "Fatal"};
+const char *const EnumStrings<Aidge::Log::Level>::data[] =
+    {"Debug", "Info", "Notice", "Warn", "Error", "Fatal"};
 }
 
-#endif //AIDGE_LOG_H_
+#endif // AIDGE_LOG_H_
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 0468ae2616997c306bbd475fe6eb73cc033b0bcc..28dab05f80a64f12a59dc1f684652f66a96dc95f 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -14,7 +14,7 @@
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
-#include <pybind11/stl.h> // declare_registrable key can recquire stl
+#include <pybind11/stl.h> // declare_registrable key can require stl
 #include <pybind11/functional.h>// declare_registrable allow binding of lambda fn
 
 #endif
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 414381891ce52046ee7c2df5b82a17e1314773cd..439d2c638731b40bec0696a73b62b99e3bfddd41 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -53,7 +53,7 @@ public:
 */
 
     // Constructor for Attributes initialization.
-    // Compile-time garantee that every attribute is initialized.
+    // Compile-time guarantee that every attribute is initialized.
     template <ATTRS_ENUM ...attrsEnum> // non-type attribute pack
     constexpr StaticAttributes(const attr<attrsEnum>&&... attrs) {
         // Check number of attrs consistency
@@ -188,7 +188,7 @@ public:
     //////////////////////////////////////
     ///     Generic Attributes API
     //////////////////////////////////////
-    // Runtime existance check with name
+    // Runtime existence check with name
     bool hasAttr(const std::string& name) const override final {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
@@ -199,18 +199,6 @@ public:
         return false;
     }
 
-#ifdef PYBIND
-        bool hasAttrPy(const std::string& name) const override final {
-        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-#endif
-
     // Runtime type access with name
     std::string getAttrType(const std::string& name) const override final {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
index 1bfe0929bf67bb0c6d3b893f3dbaf6993dcfd6ff..b5601f84c60b81d8c560b61b06c00673f51f4eee 100644
--- a/include/aidge/utils/TensorUtils.hpp
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
  * @tparam T should correspond to the type of the tensor, define the type of the absolute and relative error
  * @param t1  first :cpp:class:`Aidge::Tensor` to test
  * @param t2  second :cpp:class:`Aidge::Tensor` to test
- * @param relative relative difference allowed (should be betwen 0 and 1)
+ * @param relative relative difference allowed (should be between 0 and 1)
  * @param absolute absolute error allowed (shoulmd be positive)
  * @return true if both tensor are approximately equal and have the datatype, shape. Else return false
  */
@@ -44,11 +44,13 @@ bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float
     }
     for(size_t i = 0; i < t1.size(); ++i){
         if (static_cast<float>(std::abs(t1.get<T1>(i) - t2.get<T2>(i))) > (absolute + (relative * static_cast<float>(std::abs(t2.get<T2>(i)))))){
+            fmt::print("t1:\n{}\nt2\n{}\nat index {} {} != {}", t1, t2, i, t1.get<T1>(i), t2.get<T1>(i));
             return false;
         }
     }
     return true;
 }
-}
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_TENSOR_UTILS_H_s */
diff --git a/include/aidge/utilsParsing/ParsingToken.hpp b/include/aidge/utilsParsing/ParsingToken.hpp
index e303a5eabe6f7710873468f8edc8f3e844f4175f..8a1a740bf9bd4596675ef2dbd3e30af7765ffaa8 100644
--- a/include/aidge/utilsParsing/ParsingToken.hpp
+++ b/include/aidge/utilsParsing/ParsingToken.hpp
@@ -16,7 +16,7 @@ namespace Aidge{
         /**
          * @brief Token container
          * @param type one of the token type
-         * @param lexeme String representing aditional information of the token
+         * @param lexeme String representing additional information of the token
          */
         ParsingToken(const EnumType type , const std::string lexeme ):mLexeme(lexeme),mType(type){}
 
diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp
index 04172c3ff68641a9fe0d14f9a326cd17e7002912..49e45ed7e447c00cf7300e8228ee7d1b04800083 100644
--- a/python_binding/backend/pybind_OperatorImpl.cpp
+++ b/python_binding/backend/pybind_OperatorImpl.cpp
@@ -55,9 +55,9 @@ public:
         );
     }
 
-    std::set<ImplSpec> getAvailableImplSpecs() const noexcept override {
+    std::vector<ImplSpec> getAvailableImplSpecs() const noexcept override {
         PYBIND11_OVERRIDE_NAME(
-            std::set<ImplSpec>,
+            std::vector<ImplSpec>,
             OperatorImpl,
             "get_available_impl_specs",
             getAvailableImplSpecs
@@ -81,6 +81,13 @@ void init_OperatorImpl(py::module& m){
     .def(py::init<const DynamicAttributes&>(), py::arg("attr") = DynamicAttributes())
     .def(py::init<const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("io"), py::arg("attr") = DynamicAttributes())
     .def(py::init<const ImplSpec::IOSpec&, const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("i"), py::arg("o"), py::arg("attr") = DynamicAttributes())
+    .def("__eq__", static_cast<bool(*)(const ImplSpec&, const ImplSpec&)>(&operator==))
+    .def("__repr__", [](ImplSpec self){
+        return fmt::format("{}\n", self);
+    })
+    .def_readwrite("inputs", &ImplSpec::inputs)
+    .def_readwrite("outputs", &ImplSpec::outputs)
+    .def_readwrite("attrs", &ImplSpec::attrs)
     ;
 
     py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr())
@@ -98,4 +105,4 @@ void init_OperatorImpl(py::module& m){
     .def("get_available_impl_specs", &OperatorImpl_Publicist::getAvailableImplSpecs)
     ;
 }
-}
+} // namespace Aidge
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index e91f345d7974cb06aa7aec9e27300b9cf9230985..cdf8cc23250366c62a4102118a95e68cec28ec3d 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -10,6 +10,7 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
 
 #include "aidge/data/Data.hpp"
 
@@ -62,5 +63,10 @@ void init_Data(py::module& m){
 
     py::class_<Data, std::shared_ptr<Data>>(m,"Data");
 
+
+    m.def("format_as", (const char* (*)(DataType)) &format_as, py::arg("dt"));
+    m.def("format_as", (const char* (*)(DataFormat)) &format_as, py::arg("df"));
+    m.def("get_data_format_transpose", &getDataFormatTranspose, py::arg("src"), py::arg("dst"));  
+
 }
 }
diff --git a/python_binding/data/pybind_Interpolation.cpp b/python_binding/data/pybind_Interpolation.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0839d1c04925f46595630191da9291217d40f10f
--- /dev/null
+++ b/python_binding/data/pybind_Interpolation.cpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/data/Interpolation.hpp"
+#include "aidge/utils/Registrar.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Interpolation(py::module &m) {
+    auto pyInterpolation = py::class_<Aidge::Interpolation>(m, "Interpolation");
+
+    py::enum_<Interpolation::Mode>(pyInterpolation, "Mode")
+    .value("CUBIC", Interpolation::Mode::Cubic)
+    .value("LINEAR", Interpolation::Mode::Linear)
+    .value("ROUND_PREFER_FLOOR", Interpolation::Mode::RoundPreferFloor)
+    .value("ROUND_PREFER_CEIL", Interpolation::Mode::RoundPreferCeil)
+    .value("FLOOR", Interpolation::Mode::Floor)
+    .value("CEIL", Interpolation::Mode::Ceil)
+    .export_values();
+
+    py::enum_<Interpolation::CoordinateTransformation>(pyInterpolation, "CoordinateTransformation")
+    .value("HALF_PIXEL", Interpolation::CoordinateTransformation::HalfPixel)
+    .value("HALF_PIXEL_SYMETRIC", Interpolation::CoordinateTransformation::HalfPixelSymmetric)
+    .value("PYTORCH_HALF_PIXEL", Interpolation::CoordinateTransformation::PytorchHalfPixel)
+    .value("ALIGN_CORNERS", Interpolation::CoordinateTransformation::AlignCorners)
+    .value("ASYMMETRIC", Interpolation::CoordinateTransformation::Asymmetric)
+    .export_values();
+}
+
+} // namespace Aidge
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 91f13c17dd923d9f4b9ed6e468ce7897059d8749..0d4ed716ca6c65c2e8a0153a729ebecef771ea9e 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -251,7 +251,7 @@ static T castToNativeType(const py::object val_obj) {
 }
 
 static void addScalarCtor(pyTensorClass& mTensor) {
-    // Contructor based on bare py::object in order to match either
+    // Constructor based on bare py::object in order to match either
     // python scalars (int, float) or numpy scalars (np.int32, np.int64, ...).
     // There is a merge request to support numpy scalars in pybind, through py::numpy_scalar<T>
     // though it is not merged: https://github.com/pybind/pybind11/pull/3544/.
@@ -315,27 +315,28 @@ void init_Tensor(py::module& m){
     .def(py::self - py::self)
     .def(py::self * py::self)
     .def(py::self / py::self)
+    .def("clone", &Tensor::clone)
 	.def("sqrt", &Tensor::sqrt)
     .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true)
+    .def("set_data_format", &Tensor::setDataFormat, py::arg("data_format"), py::arg("copyTrans") = true)
     .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
     .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
     .def("grad", &Tensor::grad)
     .def("set_grad", &Tensor::setGrad)
     .def("dtype", &Tensor::dataType)
+    .def("dformat", &Tensor::dataFormat)
     .def("size", &Tensor::size)
     .def("capacity", &Tensor::capacity)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize, py::arg("dims"), py::arg("strides") = std::vector<DimSize_t>())
     .def("has_impl", &Tensor::hasImpl)
-    .def("get_coord", &Tensor::getCoord)
-    .def("get_idx", &Tensor::getIdx)
+    .def("get_coord", (std::vector<std::size_t> (Tensor::*)(const std::size_t)) &Tensor::getCoord, py::arg("flatIdx"))
+    .def("get_idx",(std::size_t (Tensor::*)(const std::vector<std::size_t> &)) &Tensor::getIdx, py::arg("coords"))
     .def_static("get_available_backends", &Tensor::getAvailableBackends)
     .def("undefined", &Tensor::undefined)
+    .def("cpy_transpose", (void (Tensor::*)(const Tensor& src, const std::vector<DimSize_t>& transpose)) &Tensor::copyTranspose, py::arg("src"), py::arg("transpose"))
+
     .def("__str__", [](Tensor& b) {
-        if (b.empty()) {
-            return std::string("{}");
-        } else {
-            return b.toString();
-        }
+        return b.toString();
     })
     .def("__repr__", [](Tensor& b) {
         return fmt::format("Tensor(dims = {}, dtype = {})", b.dims(), std::string(EnumStrings<DataType>::data[static_cast<int>(b.dataType())]));
@@ -549,7 +550,7 @@ void init_Tensor(py::module& m){
     //   - np.ndarray of a given np.dtype: it will create an equivalent tensor of dtype == np.dtype when supported
     //   - np.dtype scalar: it will create an equivalent scalar tensor of dtype == np.dtype when supported
     //
-    // In order to implement this, we provide several overloads which are carefully ordered in order to fullfil
+    // In order to implement this, we provide several overloads which are carefully ordered in order to fulfill
     // the above requirements.
     //
 
diff --git a/python_binding/filler/pybind_Filler.cpp b/python_binding/filler/pybind_Filler.cpp
index a85c0d6cd6fa0367dfc26328d214c99a4288a3be..cb0a823639fd7a2f5b9f47be2bd78b9a7e41f2d4 100644
--- a/python_binding/filler/pybind_Filler.cpp
+++ b/python_binding/filler/pybind_Filler.cpp
@@ -30,11 +30,17 @@ void init_Filler(py::module &m) {
          [](std::shared_ptr<Tensor> tensor, py::object value) -> void {
              switch (tensor->dataType()) {
                  case DataType::Float64:
-                     constantFiller<double>(tensor, value.cast<double>());
+                     constantFiller<cpptype_t<DataType::Float64>>(tensor, value.cast<cpptype_t<DataType::Float64>>());
                      break;
                  case DataType::Float32:
-                     constantFiller<float>(tensor, value.cast<float>());
+                     constantFiller<cpptype_t<DataType::Float32>>(tensor, value.cast<cpptype_t<DataType::Float32>>());
                      break;
+                case DataType::Int64:
+                    constantFiller<cpptype_t<DataType::Int64>>(tensor, value.cast<cpptype_t<DataType::Int64>>());
+                    break;
+                case DataType::Int32:
+                    constantFiller<cpptype_t<DataType::Int32>>(tensor, value.cast<cpptype_t<DataType::Int32>>());
+                    break;
                  default:
                      AIDGE_THROW_OR_ABORT(
                          py::value_error,
@@ -44,14 +50,14 @@ void init_Filler(py::module &m) {
          py::arg("tensor"), py::arg("value"))
         .def(
             "normal_filler",
-            [](std::shared_ptr<Tensor> tensor, double mean,
-               double stdDev) -> void {
+            [](std::shared_ptr<Tensor> tensor, py::object mean,
+               py::object stdDev) -> void {
                 switch (tensor->dataType()) {
                     case DataType::Float64:
-                        normalFiller<double>(tensor, mean, stdDev);
+                        normalFiller<cpptype_t<DataType::Float64>>(tensor, mean.cast<cpptype_t<DataType::Float64>>(), stdDev.cast<cpptype_t<DataType::Float64>>());
                         break;
                     case DataType::Float32:
-                        normalFiller<float>(tensor, mean, stdDev);
+                        normalFiller<cpptype_t<DataType::Float32>>(tensor, mean.cast<cpptype_t<DataType::Float32>>(), stdDev.cast<cpptype_t<DataType::Float32>>());
                         break;
                     default:
                         AIDGE_THROW_OR_ABORT(
@@ -60,23 +66,39 @@ void init_Filler(py::module &m) {
                 }
             },
             py::arg("tensor"), py::arg("mean") = 0.0, py::arg("stdDev") = 1.0)
-        .def(
-            "uniform_filler",
-            [](std::shared_ptr<Tensor> tensor, double min, double max) -> void {
+        .def("uniform_filler", [] (std::shared_ptr<Tensor> tensor, py::object min, py::object max) -> void {
+            if (py::isinstance<py::int_>(min) && py::isinstance<py::int_>(max)) {
                 switch (tensor->dataType()) {
-                    case DataType::Float64:
-                        uniformFiller<double>(tensor, min, max);
+                    case DataType::Int32:
+                        uniformFiller<std::int32_t>(tensor, min.cast<std::int32_t>(), max.cast<std::int32_t>());
+                        break;
+                    case DataType::Int64:
+                        uniformFiller<std::int64_t>(tensor, min.cast<std::int64_t>(), max.cast<std::int64_t>());
+                        break;
+                    default:
+                        AIDGE_THROW_OR_ABORT(
+                            py::value_error,
+                            "Data type is not supported for Uniform filler.");
                         break;
+                }
+            } else if (py::isinstance<py::float_>(min) && py::isinstance<py::float_>(max)) {
+                switch (tensor->dataType()) {
                     case DataType::Float32:
-                        uniformFiller<float>(tensor, min, max);
+                        uniformFiller<float>(tensor, min.cast<float>(), max.cast<float>());
+                        break;
+                    case DataType::Float64:
+                        uniformFiller<double>(tensor, min.cast<double>(), max.cast<double>());
                         break;
                     default:
                         AIDGE_THROW_OR_ABORT(
                             py::value_error,
                             "Data type is not supported for Uniform filler.");
+                        break;
                 }
-            },
-            py::arg("tensor"), py::arg("min"), py::arg("max"))
+            } else {
+                AIDGE_THROW_OR_ABORT(py::value_error,"Input must be either an int or a float.");
+            }
+            }, py::arg("tensor"), py::arg("min"), py::arg("max"))
         .def(
             "xavier_uniform_filler",
             [](std::shared_ptr<Tensor> tensor, py::object scaling,
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index c0ee183b072398e2e393bdbd7446de0155519169..60d80e783d2e7d2e50d5f832b3508bf065edb707 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -30,6 +30,8 @@ void init_GraphView(py::module& m) {
           :param path: save location
           :type path: str
           )mydelimiter")
+          .def("inputs", (std::vector<std::pair<NodePtr, IOIndex_t>> (GraphView::*)() const) &GraphView::inputs)
+          .def("outputs", (std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> (GraphView::*)() const) &GraphView::outputs)
           .def("in_view", (bool (GraphView::*)(const NodePtr&) const) &GraphView::inView)
           .def("in_view", (bool (GraphView::*)(const std::string&) const) &GraphView::inView)
           .def("root_node", &GraphView::rootNode)
@@ -93,7 +95,7 @@ void init_GraphView(py::module& m) {
           :type to_other_node: Node
           :param from_out_node: Node inside the GraphView the new Node will be linked to (it will become a parent of the new Node). If the GraphView only has one output Node, then default to this Node.
           :type from_out_node: Node
-          :param from_tensor: Ouput Tensor ID of the already included Node. Default to 0.
+          :param from_tensor: Output Tensor ID of the already included Node. Default to 0.
           :type from_tensor: int
           :param to_tensor: Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning first available data input for the Node.
           :type to_tensor: int
@@ -146,6 +148,9 @@ void init_GraphView(py::module& m) {
           //                return py::none();
           //           }
           //      })
+          .def("get_ranked_nodes", &GraphView::getRankedNodes)
+          .def("get_ranked_nodes_name", &GraphView::getRankedNodesName, py::arg("format"), py::arg("mark_non_unicity") = true)
+          .def("set_dataformat", &GraphView::setDataFormat, py::arg("dataformat"))
             ;
 
      m.def("get_connected_graph_view", &getConnectedGraphView);
diff --git a/python_binding/graph/pybind_Matching.cpp b/python_binding/graph/pybind_Matching.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..af385798175afe68d2e89cd65f3645fc7b806eb3
--- /dev/null
+++ b/python_binding/graph/pybind_Matching.cpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/functional.h>
+#include <pybind11/stl.h>
+#include <memory>
+#include <string>
+#include "aidge/graph/Matching.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+//#include "aidge/data/Data.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+void init_SinglePassGraphMatching(py::module& m) {
+    py::class_<Aidge::SinglePassGraphMatching::MatchingResult>(m,"MatchingResult")
+        .def(py::init<>())
+        .def_readwrite("graph", &Aidge::SinglePassGraphMatching::MatchingResult::graph)
+        .def_readwrite("anchors", &Aidge::SinglePassGraphMatching::MatchingResult::anchors)
+        .def_readwrite("startNode", &Aidge::SinglePassGraphMatching::MatchingResult::startNode);
+ 
+    py::class_<Aidge::SinglePassGraphMatching>(m, "SinglePassGraphMatching") 
+        .def(py::init<std::shared_ptr<GraphView>>(), py::arg("graph"))
+        .def("match", 
+            [](Aidge::SinglePassGraphMatching& self, const std::string& query, bool disjoint){
+                // Note: Need to convert set to vector has MatchingResult is not hashable and 
+                // set<MatchingResult> cannot be binded
+                std::set<Aidge::SinglePassGraphMatching::MatchingResult> set_res = self.match(query, disjoint);
+                std::vector<Aidge::SinglePassGraphMatching::MatchingResult> vec_res(set_res.begin(), set_res.end());
+                return vec_res;
+            },
+            py::arg("query"), py::arg("disjoint") = false, 
+            R"mydelimiter( Matches a query by direct, single-pass parse and match.
+            :param query: The query string to search.
+            :param disjoint: If true, only keep the longest disjoint matches.
+            :return: A set of MatchingResult instances.
+            )mydelimiter")
+        .def("add_node_lambda", &SinglePassGraphMatching::addNodeLambda, py::arg("name"), py::arg("func"));
+
+}
+}  // namespace Aidge
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index d8e77bb259cbcbae7940a09dc405bb8f50b5b79b..1f67b48a08d2fbe5db39436f599ecfc0f236268c 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -34,6 +34,11 @@ void init_Node(py::module& m) {
     Type of the node.
     )mydelimiter")
 
+    .def("attributes", &Node::attributes,
+    R"mydelimiter(
+    Get attributes.
+    )mydelimiter")
+
     .def("get_operator", &Node::getOperator,
     R"mydelimiter(
     Get the Operator object of the Node.
@@ -48,7 +53,7 @@ void init_Node(py::module& m) {
     :rtype: str
     )mydelimiter")
 
-    .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"), 
+    .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"),
     R"mydelimiter(
     Given a base name, generate a new name which is unique in all the GraphViews containing this node.
 
@@ -71,7 +76,7 @@ void init_Node(py::module& m) {
     :type other_node: :py:class: Node
     :param out_id: ID of the output of the current Node to connect to the other Node. (If Node has 1 output max ID is 0). Default to 0.
     :type out_id: int
-    :param other_in_id: ID of the input of the other Node to connect to the current Node (If the node is a Mul op it has 2 input then Max ID is 1).Default to the first avaible data input.
+    :param other_in_id: ID of the input of the other Node to connect to the current Node (If the node is a Mul op it has 2 input then Max ID is 1).Default to the first available data input.
     :type other_in_id: int
     )mydelimiter")
 
@@ -123,7 +128,7 @@ void init_Node(py::module& m) {
     R"mydelimiter(
     Get, for each output of the Node, a list of the children Node and the associated input index connected to it.
 
-    :return: List of a list of connections. When an outut is not linked to any child,  its list a empty.
+    :return: List of a list of connections. When an output is not linked to any child,  its list a empty.
     :rtype: list[list[tuple[Node, int]]]
     )mydelimiter")
 
@@ -171,13 +176,18 @@ void init_Node(py::module& m) {
     Get children.
     )mydelimiter")
 
+    .def("get_ordered_children", &Node::getOrderedChildren,
+    R"mydelimiter(
+    Get ordered children.
+    )mydelimiter")
+
     .def("__call__",
         [](Node &self, pybind11::args args) {
             std::vector<Connector> connectors;
             for (const auto &arg : args) {
                 // Check if the argument is an instance of Connector
                 if (pybind11::isinstance<Connector>(arg)) {
-                    // Convert Python object to C++ object adn push it ot vector
+                    // Convert Python object to C++ object and push it to vector
                     connectors.push_back(arg.cast<Connector>());
                 }
                 else if (arg.is(py::none())) {
diff --git a/python_binding/graph/pybind_StaticAnalysis.cpp b/python_binding/graph/pybind_StaticAnalysis.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b7c704d722e81b36e8d4988a4503428918e16a5a
--- /dev/null
+++ b/python_binding/graph/pybind_StaticAnalysis.cpp
@@ -0,0 +1,141 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include "aidge/graph/StaticAnalysis.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+/**
+ * @brief Trampoline class for binding
+ *
+ */
+class pyOperatorStats: public OperatorStats {
+public:
+    using OperatorStats::OperatorStats; // Inherit constructors
+
+    size_t getNbArithmOps() const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            OperatorStats,
+            getNbArithmOps
+        );
+    }
+
+    size_t getNbLogicOps() const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            OperatorStats,
+            getNbLogicOps
+        );
+    }
+
+    size_t getNbCompOps() const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            OperatorStats,
+            getNbCompOps
+        );
+    }
+
+    size_t getNbNLOps() const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            OperatorStats,
+            getNbNLOps
+        );
+    }
+
+    size_t getNbArithmIntOps() const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            OperatorStats,
+            getNbArithmIntOps
+        );
+    }
+
+    size_t getNbMACOps() const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            OperatorStats,
+            getNbMACOps
+        );
+    }
+};
+
+class pyStaticAnalysis: public StaticAnalysis {
+public:
+    using StaticAnalysis::StaticAnalysis; // Inherit constructors
+
+    size_t getNbParams(std::shared_ptr<Node> node) const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            StaticAnalysis,
+            getNbParams,
+            node
+        );
+    }
+
+    size_t getParamsSize(std::shared_ptr<Node> node) const override {
+        PYBIND11_OVERRIDE(
+            size_t,
+            StaticAnalysis,
+            getParamsSize,
+            node
+        );
+    }
+
+    void summary(bool incProducers) const override {
+        PYBIND11_OVERRIDE(
+            void,
+            StaticAnalysis,
+            summary,
+            incProducers
+        );
+    }
+};
+
+void init_StaticAnalysis(py::module& m){
+    py::class_<OperatorStats, std::shared_ptr<OperatorStats>, pyOperatorStats>(m, "OperatorStats", py::multiple_inheritance(), py::dynamic_attr())
+    .def(py::init<const Operator&>(), py::arg("op"))
+    .def("get_operator", &OperatorStats::getOperator)
+    .def("get_nb_arithm_ops", &OperatorStats::getNbArithmOps)
+    .def("get_nb_logic_ops", &OperatorStats::getNbLogicOps)
+    .def("get_nb_comp_ops", &OperatorStats::getNbCompOps)
+    .def("get_nb_nl_ops", &OperatorStats::getNbNLOps)
+    .def("get_nb_ops", &OperatorStats::getNbOps)
+    .def("get_nb_arithm_int_ops", &OperatorStats::getNbArithmIntOps)
+    .def("get_nb_arithm_fp_ops", &OperatorStats::getNbArithmFpOps)
+    .def("get_nb_mac_ops", &OperatorStats::getNbMACOps)
+    ;
+    declare_registrable<OperatorStats>(m, "OperatorStats");
+
+    py::class_<StaticAnalysis, std::shared_ptr<StaticAnalysis>, pyStaticAnalysis>(m, "StaticAnalysis", py::multiple_inheritance(), py::dynamic_attr())
+    .def(py::init<std::shared_ptr<GraphView>>(), py::arg("graph"))
+    .def("get_graph", &StaticAnalysis::getGraph)
+    .def("get_nb_params", &StaticAnalysis::getNbParams, py::arg("node"))
+    .def("get_params_size", &StaticAnalysis::getParamsSize, py::arg("node"))
+    .def("get_nb_arithm_ops", &StaticAnalysis::getNbArithmOps)
+    .def("get_nb_logic_ops", &StaticAnalysis::getNbLogicOps)
+    .def("get_nb_comp_ops", &StaticAnalysis::getNbCompOps)
+    .def("get_nb_nl_ops", &StaticAnalysis::getNbNLOps)
+    .def("get_nb_ops", &StaticAnalysis::getNbOps)
+    .def("get_nb_arithm_int_ops", &StaticAnalysis::getNbArithmIntOps)
+    .def("get_nb_arithm_fp_ops", &StaticAnalysis::getNbArithmFpOps)
+    .def("get_nb_mac_ops", &StaticAnalysis::getNbMACOps)
+    .def("summary", &StaticAnalysis::summary, py::arg("inc_producers") = false)
+    .def("get_op_stats", &StaticAnalysis::getOpStats, py::arg("node"))
+    ;
+}
+}
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 8a00a1cb4a419f1125411b5b1c823bf91570d62e..f8adfd5f4becb7677b3a59791f8549bb114fbbc4 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -22,14 +22,14 @@ namespace Aidge {
 
 void declare_Add(py::module &m) {
   py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
-    .def(py::init<const IOIndex_t>(), py::arg("nb_inputs"))
+    .def(py::init<>())
     .def_static("get_inputs_name", &Add_Op::getInputsName)
     .def_static("get_outputs_name", &Add_Op::getOutputsName)
     .def_readonly_static("Type", &Add_Op::Type);
 
   declare_registrable<Add_Op>(m, "AddOp");
 
-  m.def("Add", &Add, py::arg("nb_inputs"), py::arg("name") = "");
+  m.def("Add", &Add, py::arg("name") = "");
 }
 
 void init_Add(py::module &m) {
diff --git a/python_binding/operator/pybind_Atan.cpp b/python_binding/operator/pybind_Atan.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e9e277fcc336b8d4436aaf8dc1a834e666e400ae
--- /dev/null
+++ b/python_binding/operator/pybind_Atan.cpp
@@ -0,0 +1,31 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Atan.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Atan(py::module& m) {
+    py::class_<Atan_Op, std::shared_ptr<Atan_Op>, OperatorTensor>(m, "AtanOp", py::multiple_inheritance())
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Atan_Op::getInputsName)
+    .def_static("get_outputs_name", &Atan_Op::getOutputsName);
+
+    declare_registrable<Atan_Op>(m, "AtanOp");
+
+    m.def("Atan", &Atan, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index b98a642111402050fd3cba6dd8a12b11a3bbde8a..2d137c4de0c62ff1f8391b03cb07bc9230b7c9eb 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -28,7 +28,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 
-  const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D");
+  const std::string pyClassName("AvgPooling" + std::to_string(DIM) + "DOp");
   const std::string pyStaticAttrClassName("StaticAttributes" + pyClassName);
 //   py::class_<StaticAttributes<AvgPoolingAttr,
 //                                              std::array<DimSize_t, DIM>,
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 9a1bdacd169beebc843448d23bdaf8502de437b4..c380f594038fe7f77d1c01a4e9c8285ccf5ad85a 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -23,19 +23,20 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
+    const std::string pyClassName("BatchNorm" + std::to_string(DIM) + "DOp");
     py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(), py::multiple_inheritance())
-        .def(py::init<float, float>(),
+        .def(py::init<float, float, bool>(),
             py::arg("epsilon"),
-            py::arg("momentum"))
+            py::arg("momentum"),
+            py::arg("training_mode"))
         .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
         .def_readonly_static("Type", &BatchNorm_Op<DIM>::Type);
 
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
-    m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nb_features"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
+    m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nb_features"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("training_mode") = false, py::arg("name") = "");
 }
 
 void init_BatchNorm(py::module &m) {
diff --git a/python_binding/operator/pybind_Cast.cpp b/python_binding/operator/pybind_Cast.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..960a084ff063e6310a4526dd65e8dabb0e8f905a
--- /dev/null
+++ b/python_binding/operator/pybind_Cast.cpp
@@ -0,0 +1,46 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include <string>
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Cast.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Cast(py::module &m) {
+    // Binding for CastOp class
+    auto pyCastOp = py::class_<Cast_Op, std::shared_ptr<Cast_Op>, OperatorTensor>(m, "CastOp", py::multiple_inheritance(),R"mydelimiter(
+        CastOp is a tensor operator that casts the input tensor to a data type specified by the target_type argument.
+        :param target_type: data type of the output tensor 
+        :type target_type: Datatype
+        :param name: name of the node.
+    )mydelimiter")
+        .def(py::init<DataType>(), py::arg("target_type"))
+        .def("target_type", &Cast_Op::targetType, "Get the targeted type, output tensor data type")
+        .def_static("get_inputs_name", &Cast_Op::getInputsName, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", &Cast_Op::getOutputsName, "Get the names of the output tensors.");
+
+    // Binding for the Cast function
+    m.def("Cast", &Cast, py::arg("target_type"), py::arg("name") = "",
+        R"mydelimiter(
+        CastOp is a tensor operator that casts the input tensor to a data type specified by the target_type argument.
+        :param target_type: data type of the output tensor 
+        :type target_type: Datatype
+        :param name: name of the node.
+    )mydelimiter");
+}
+} // namespace Aidge
\ No newline at end of file
diff --git a/python_binding/operator/pybind_Clip.cpp b/python_binding/operator/pybind_Clip.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..27c47811a3c192f1f657f339fe4caf047a944224
--- /dev/null
+++ b/python_binding/operator/pybind_Clip.cpp
@@ -0,0 +1,48 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Clip.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Clip(py::module& m) {
+    py::class_<Clip_Op, std::shared_ptr<Clip_Op>, OperatorTensor>(m, "ClipOp", py::multiple_inheritance())
+    .def(py::init<float, float>(), py::arg("min") = std::numeric_limits<float>::lowest(), py::arg("max") = std::numeric_limits<float>::max())
+    .def_static("get_inputs_name", &Clip_Op::getInputsName)
+    .def_static("get_outputs_name", &Clip_Op::getOutputsName)
+    .def("min",&Clip_Op::min,py::return_value_policy::reference_internal)
+    .def("max",&Clip_Op::max,py::return_value_policy::reference_internal);
+    
+    
+    declare_registrable<Clip_Op>(m, "ClipOp");
+   
+    m.def("Clip", &Clip,py::arg("name") = "",
+    py::arg("min")= std::numeric_limits<float>::lowest(),
+    py::arg("max")= std::numeric_limits<float>::max(),
+    R"mydelimiter(ClipOp is a tensor operator that performs a clipping operation on tensor elements.
+        This class allows limiting tensor values to a specified range, defined by the min 
+        and max parameters. Values outside this range are replaced by the corresponding 
+        limit values. When 'min' is greater than 'max', the clip operator sets all the 'input' values to the value of 'max'
+        :param min: minimum clipping value.
+        :type min: float
+        :param max: maximum clipping value.
+        :type max: float
+        :param name: name of the node.
+    )mydelimiter");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
index b0d5ef2ef78380422ca1a137608f5289fa519aed..189337a384d55c91f6aceeb97c530ed92ef7b4d0 100644
--- a/python_binding/operator/pybind_ConstantOfShape.cpp
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -24,7 +24,7 @@ namespace Aidge {
 void init_ConstantOfShape(py::module &m) {
   py::class_<ConstantOfShape_Op, std::shared_ptr<ConstantOfShape_Op>, OperatorTensor>(
       m, "ConstantOfShapeOp", py::multiple_inheritance())
-      // Here we bind the methods of the Unsqueeze_Op that wil want to access
+      // Here we bind the methods of the Unsqueeze_Op that will want to access
       .def("get_inputs_name", &ConstantOfShape_Op::getInputsName)
       .def("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
       .def("value", &ConstantOfShape_Op::value);
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index bc72825b2161d8733334817e095c251c788e7eba..3bd54da74f90a38b0255e021234786cd21e6c8a3 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
-  const std::string pyClassName("ConvOp" + std::to_string(DIM) + "D");
+  const std::string pyClassName("Conv" + std::to_string(DIM) + "DOp");
   py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 377d0fca5d78dff20b8df0cc0d5521eb9a3685a2..b69fee02a50b95e4abfda895580f3192e7876766 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -27,7 +27,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
-  const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D");
+  const std::string pyClassName("ConvDepthWise" + std::to_string(DIM) + "DOp");
   py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
diff --git a/python_binding/operator/pybind_DepthToSpace.cpp b/python_binding/operator/pybind_DepthToSpace.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..efb8a7406774a5b071e8ebc3bda69d6ec773b50a
--- /dev/null
+++ b/python_binding/operator/pybind_DepthToSpace.cpp
@@ -0,0 +1,59 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/DepthToSpace.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/Types.h"
+
+static typename Aidge::DepthToSpace_Op::Mode stringToMode(const std::string& mode) {
+static std::unordered_map<std::string, typename Aidge::DepthToSpace_Op::Mode> map = {
+    {"DCR", Aidge::DepthToSpace_Op::Mode::DCR},
+    {"CRD", Aidge::DepthToSpace_Op::Mode::CRD}
+};
+return map[mode];
+}
+
+namespace py = pybind11;
+namespace Aidge {
+
+void declare_DepthToSpace(py::module &m) {
+
+    py::class_<DepthToSpace_Op, std::shared_ptr<DepthToSpace_Op>, OperatorTensor> (m, "DepthToSpaceOp", py::multiple_inheritance())
+    .def(py::init([](const std::uint32_t blockSize, const std::string& mode) {
+            return new DepthToSpace_Op(blockSize, stringToMode(mode));
+        }), py::arg("block_size"), py::arg("mode") = "CRD")
+    .def_static("get_inputs_name", &DepthToSpace_Op::getInputsName)
+    .def_static("get_outputs_name", &DepthToSpace_Op::getOutputsName)
+    .def_readonly_static("Type", &DepthToSpace_Op::Type)
+    .def("__repr__", [](DepthToSpace_Op& b) {
+        return fmt::format("Operator(type='{}')", b.Type);
+    });
+
+  declare_registrable<DepthToSpace_Op>(m, "DepthToSpaceOp");
+
+  m.def("DepthToSpace", [](
+            const std::uint32_t blockSize,
+            const std::string& mode,
+            const std::string& name) {
+        return DepthToSpace(blockSize, stringToMode(mode), name);
+    }, py::arg("block_size"), py::arg("mode") = "CRD", py::arg("name") = "");
+}
+
+void init_DepthToSpace(py::module &m) {
+  declare_DepthToSpace(m);
+}
+
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 6af8fef88e411af0a3ecbe5a771bf7af24de411a..f125291fafb89ec7ae81678a37e2bde2222a1054 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -64,5 +64,7 @@ void init_GenericOperator(py::module& m) {
             }
             return genericNode;
         }, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = "");
+
+    declare_registrable<GenericOperator_Op>(m, "GenericOperatorOp");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index 6d6c03b82ad4f905c41bb0cf849fc4e05fda4cb2..69454c2abfa56a16dbc3f3638d68e98ce93d2538 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -55,7 +55,7 @@ void declare_GridSampleOp(py::module &m) {
             return new GridSample_Op(stringToInterpolationMode(mode), stringToPaddingMode(padding_mode), align_corners);
         }), py::arg("mode") = "linear",
             py::arg("padding_mode") = "zeros",
-            py::arg("alogn_corners") = false)
+            py::arg("align_corners") = false)
         .def_static("get_inputs_name", &GridSample_Op::getInputsName)
         .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
         .def_readonly_static("Type", &GridSample_Op::Type)
diff --git a/python_binding/operator/pybind_Heaviside.cpp b/python_binding/operator/pybind_Heaviside.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cbc2502aac018927c544a57f343a6305ee2bd86f
--- /dev/null
+++ b/python_binding/operator/pybind_Heaviside.cpp
@@ -0,0 +1,56 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Heaviside.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+
+namespace Aidge {
+
+void init_Heaviside(py::module &m) {
+    py::class_<Heaviside_Op, std::shared_ptr<Heaviside_Op>, OperatorTensor>(
+        m,
+        "HeavisideOp",
+        py::multiple_inheritance(),
+         R"mydelimiter(
+          Initialize an Heaviside node. This node will compute a heaviside step function 
+          on each element of the input tensor.
+          heaviside(input, values) = { 0  if input < 0 
+                                     { values if input == 0
+                                     { 1 if input > 0
+
+          :param value : The value use for the output tensor when input is 0.
+          :type value : float
+          :param name : Name of the node.
+          )mydelimiter")
+        .def(py::init<float>(), py::arg("value"))
+        .def_static("get_inputs_name", &Heaviside_Op::getInputsName)
+        .def_static("get_outputs_name", &Heaviside_Op::getOutputsName)
+        .def_readonly_static("Type", &Heaviside_Op::Type);
+
+    declare_registrable<Heaviside_Op>(m, "HeavisideOp");
+    m.def("Heaviside", &Heaviside, py::arg("value"), py::arg("name") = "",
+            R"mydelimiter(
+          Initialize an Heaviside node. This node will compute a heaviside step function 
+          on each element of the input tensor.
+          heaviside(input, values) = { 0  if input < 0 
+                                     { values if input == 0
+                                     { 1 if input > 0
+
+          :param value : The value use for the output tensor when input is 0.
+          :type value : float
+          :param name : Name of the node.
+          )mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_LRN.cpp b/python_binding/operator/pybind_LRN.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..178af540b3efb280eaba67572e07cb673e563b22
--- /dev/null
+++ b/python_binding/operator/pybind_LRN.cpp
@@ -0,0 +1,31 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/LRN.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_LRN(py::module& m) {
+    py::class_<LRN_Op, std::shared_ptr<LRN_Op>, OperatorTensor>(m, "LRNOp", py::multiple_inheritance())
+        .def(py::init<std::int32_t>(), py::arg("size"))
+        .def_static("get_inputs_name", &LRN_Op::getInputsName)
+        .def_static("get_outputs_name", &LRN_Op::getOutputsName)
+        .def_readonly_static("Type", &LRN_Op::Type);
+    declare_registrable<LRN_Op>(m, "LRNOp");
+    m.def("LRN", &LRN, py::arg("size"), py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index b59a4c5574ce5e56af13f9aea13e7514c9402c22..00f6d26bd3ae49b26d72236b71372c1f557ddbdd 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -26,9 +26,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
-  const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D");
+  const std::string pyClassName("MaxPooling" + std::to_string(DIM) + "DOp");
   py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor>(
-    m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index d021a79c5ff4e337bebf424465458ddabf056a56..5f173068af0f1140830d458979ec924c38ade078 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -47,7 +47,7 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("no_bias")= false);
-    m.def(("PaddedConvOp" + std::to_string(DIM) + "D").c_str(), [](
+    m.def(("PaddedConv" + std::to_string(DIM) + "DOp").c_str(), [](
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
@@ -87,7 +87,7 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("no_bias") = false);
-  m.def(("PaddedConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(), [](
+  m.def(("PaddedConvDepthWise" + std::to_string(DIM) + "DOp").c_str(), [](
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
@@ -121,7 +121,7 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0));
-  m.def(("PaddedAvgPoolingOp" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+  m.def(("PaddedAvgPooling" + std::to_string(DIM) + "DOp").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims)
     {
@@ -152,7 +152,7 @@ template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
        py::arg("ceil_mode") = false);
-  m.def(("PaddedMaxPoolingOp" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
+  m.def(("PaddedMaxPooling" + std::to_string(DIM) + "DOp").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
                                                          bool ceil_mode)
@@ -194,15 +194,20 @@ void init_MetaOperatorDefs(py::module &m) {
 //   declare_PaddedMaxPoolingOp<3>(m);
   declare_LSTMOp(m);
 
-  py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperator_Op", py::multiple_inheritance())
-  .def(py::init<const char *, const std::shared_ptr<GraphView>&>(),
+  py::class_<MetaOperator_Op, std::shared_ptr<MetaOperator_Op>, OperatorTensor>(m, "MetaOperatorOp", py::multiple_inheritance())
+  .def(py::init<const char *, const std::shared_ptr<GraphView>&, const std::vector<InputCategory>&>(),
           py::arg("type"),
-          py::arg("graph"))
-  .def("get_micro_graph", &MetaOperator_Op::getMicroGraph);
+          py::arg("graph"),
+          py::arg("forced_inputs_category") = std::vector<InputCategory>())
+  .def("get_micro_graph", &MetaOperator_Op::getMicroGraph)
+  .def("set_upper_node", &MetaOperator_Op::setUpperNode);
+
+  declare_registrable<MetaOperator_Op>(m, "MetaOperatorOp");
 
   m.def("meta_operator", &MetaOperator,
     py::arg("type"),
     py::arg("graph"),
+    py::arg("forced_inputs_category") = std::vector<InputCategory>(),
     py::arg("name") = ""
   );
 
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 6ffbdd007b9f929ccac18de12f2319dcd68b1eda..7fa9e5825983eb0c82d2b1f84b77557e656a7d78 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -37,6 +37,7 @@ void init_Operator(py::module& m){
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
     .def("__repr__", &Operator::repr)
     .def("backend", &Operator::backend)
+    .def("clone", &Operator::clone)
     .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput, py::const_), py::arg("outputIdx"), py::arg("data"))
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
     .def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx"))
@@ -60,11 +61,10 @@ void init_Operator(py::module& m){
     .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
     .def("type", &Operator::type)
     .def("get_impl", &Operator::getImpl)
-    .def("get_hook", &Operator::getHook)
-    .def("add_hook", &Operator::addHook)
     .def_property_readonly("attr", &Operator::attributes)
     .def("set_back_edges", &Operator::setBackEdges, py::arg("input_indexes"))
     .def("is_back_edge", &Operator::isBackEdge, py::arg("input_index"))
+    .def("is_atomic", &Operator::isAtomic)
     ;
 }
 }
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 04882b7f5b86c7c09ed8b8e5a15c4bfabd03bb55..7dc4a4bee1a009b3ca033ea29861768c1a6fc19d 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -25,12 +25,12 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
-  const std::string pyClassName("PadOp" + std::to_string(DIM) + "D");
+  const std::string pyClassName("Pad" + std::to_string(DIM) + "DOp");
   py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, 2*DIM> &,
-                const PadBorderType &,
+                PadBorderType,
                 double>(),
         py::arg("beginEndTuples"),
         py::arg("borderType") = PadBorderType::Constant,
@@ -42,7 +42,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
   declare_registrable<Pad_Op<DIM>>(m, pyClassName);
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
                                                         const std::string& name,
-                                                        const PadBorderType &borderType = PadBorderType::Constant,
+                                                        PadBorderType borderType = PadBorderType::Constant,
                                                         double borderValue = 0.0) {
         AIDGE_ASSERT(beginEndTuples.size() == 2*DIM, "begin_end_tuples size [{}] does not match DIM [{}]", beginEndTuples.size(), 2*DIM);
         return Pad<DIM>(to_array<2*DIM>(beginEndTuples.begin()), name, borderType, borderValue);
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
index 35321f525e486107af3715ce1c09f48b7c5cd60f..2aa62609835a7042dd0df54f28b453b7e33a3b5b 100644
--- a/python_binding/operator/pybind_Resize.cpp
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -9,22 +9,51 @@
  *
  ********************************************************************************/
 
+#include <cstddef>  // std::size_t
+
 #include <pybind11/pybind11.h>
 
-#include "aidge/operator/Resize.hpp"
+#include "aidge/data/Interpolation.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Resize.hpp"
+#include "aidge/utils/Registrar.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
-void init_Resize(py::module& m) {
-    py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
-        .def_static("get_inputs_name", &Resize_Op::getInputsName)
-        .def_static("get_outputs_name", &Resize_Op::getOutputsName)
-        .def_readonly_static("Type", &Resize_Op::Type);
+void init_Resize(py::module &m) {
+  py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(
+          m, "ResizeOp", py::multiple_inheritance())
+          .def(py::init<Interpolation::CoordinateTransformation, Interpolation::Mode, float, PadBorderType>(), py::arg("coordinate_transformation_mode"), py::arg("interpolation_mode"), py::arg("cubic_coeff_a") = -0.75f, py::arg("padding_mode") = PadBorderType::Edge)
+          .def_static("get_inputs_name", &Resize_Op::getInputsName)
+          .def_static("get_outputs_name", &Resize_Op::getOutputsName)
+          .def_readonly_static("Type", &Resize_Op::Type);
 
-    declare_registrable<Resize_Op>(m, "ResizeOp");
+  declare_registrable<Resize_Op>(m, "ResizeOp");
 
-    m.def("Resize", &Resize, py::arg("name") = "");
+  m.def("Resize", &Resize,
+        py::arg("scale") = std::vector<float>({}),
+        py::arg("size") = std::vector<std::size_t>({}),
+        py::arg("coord_transfo_mode") =
+            Interpolation::CoordinateTransformation::HalfPixel,
+        py::arg("interpolation_mode") =
+            Interpolation::Mode::RoundPreferFloor,
+        py::arg("cubic_interpolation_coefficient_a") = -.75f,
+        py::arg("name") = "", R"mydelimiter(
+    Initialize a node containing a Resize operator.
+    This node can take 4 different inputs.
+    #0 Input to resize
+    #1 ROI NOT SUPPORTED (optional) - Tensor(double|float|float16)
+    #2 scales (optional) - tensor(float): #3 sizes - tensor(int64)
+    #3 sizes - tensor(int64)
+	:type coordinate_transformation_mode : :py:class: List[Int]
+	:param interpolationMode : Type of interpolation used in case of upsampling
+	:type interpolationMode : Interpolation::Mode
+	:param cubic_coeff_a : "A" coefficient of cubic interpolation. Only used if interpolation_mode = Interpolation::Mode::Cubic
+	:type cubic_coeff_a  : float
+    :param name : name of the node.
+    :type name : str
+    )mydelimiter");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Round.cpp b/python_binding/operator/pybind_Round.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e9ed0e473eaa820537590633a89ca47382d36672
--- /dev/null
+++ b/python_binding/operator/pybind_Round.cpp
@@ -0,0 +1,36 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Round.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Round(py::module& m) {
+    py::class_<Round_Op, std::shared_ptr<Round_Op>, OperatorTensor>(m, "RoundOp", py::multiple_inheritance())
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Round_Op::getInputsName)
+    .def_static("get_outputs_name", &Round_Op::getOutputsName)
+    .def_readonly_static("Type", &Round_Op::Type);
+    declare_registrable<Round_Op>(m, "RoundOp");
+    m.def("Round", &Round, py::arg("name") = "", R"mydelimiter(
+    RoundOp is a tensor operator that rounds the values of a tensor element-wise.
+        This class rounds each value to the nearest integer. In the case of halves, 
+        the rule is to round them to the nearest even integer.
+        :param X: input tensor.
+        :type X: tensor of type float, double, float16, or bfloat16.
+        :param Y: output tensor with the same shape and type as the input tensor.
+    )mydelimiter");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp
index db7fc7bfb60ff8360933e5f84ab54d4cec8df724..09e0e2fa2c1c46bafcd253267d5c33187de6bd69 100644
--- a/python_binding/operator/pybind_Sigmoid.cpp
+++ b/python_binding/operator/pybind_Sigmoid.cpp
@@ -25,6 +25,8 @@ void init_Sigmoid(py::module& m) {
     .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName)
     .def_readonly_static("Type", &Sigmoid_Op::Type);
 
+    declare_registrable<Sigmoid_Op>(m, "SigmoidOp");
+
     m.def("Sigmoid", &Sigmoid, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Stack.cpp b/python_binding/operator/pybind_Stack.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c9bd969faf714cacb0dbf44a0b0fe6e84281ffd8
--- /dev/null
+++ b/python_binding/operator/pybind_Stack.cpp
@@ -0,0 +1,41 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Stack.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+void init_Stack(py::module &m) {
+    py::class_<StackOp, std::shared_ptr<StackOp>, OperatorTensor>(
+        m,
+        "StackOp",
+        py::multiple_inheritance(),
+        R"mydelimiter(Initialize a Stack operator.)mydelimiter")
+        .def(py::init<const std::uint32_t>(), py::arg("max_elements"))
+        .def_static("get_inputs_name", &StackOp::getInputsName)
+        .def_static("get_outputs_name", &StackOp::getOutputsName)
+        .def_readonly_static("Type", &StackOp::s_type);
+
+    m.def("Stack",
+          &Stack,
+          py::arg("max_elements") = 0,
+          py::arg("name") = "",
+          R"mydelimiter(
+        Initialize a node containing a Stack operator.
+            :param max_elements : the maximum number of tensors to be stacked.
+			:param name: name of the node.
+		)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index 930dd95f3c3e4b10d2b4f8b496dfbbbcc6822050..20794a15585529e10a83d2bf6fa7c18edfbde3fa 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -28,13 +28,26 @@ namespace Aidge {
 void declare_Transpose(py::module &m) {
   const std::string pyClassName("TransposeOp");
   py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, OperatorTensor>(
-    m, "TransposeOp", py::multiple_inheritance())
-    .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order"))
+    m, "TransposeOp", py::multiple_inheritance(),
+      R"mydelimiter(
+		      Initialize transpose operator
+		          :param output_dims_order : axes permutation order, must be of rank = r and values between [0;r-1]
+					with r = input_tensor.nbDims()
+		:type output_dims_order : :py:class: List[Int]
+		)mydelimiter")
+    .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order")=std::vector<std::size_t>())
     .def_static("get_inputs_name", &Transpose_Op::getInputsName)
     .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
     .def_readonly_static("Type", &Transpose_Op::Type);
   declare_registrable<Transpose_Op>(m, pyClassName);
-  m.def("Transpose", &Transpose, py::arg("output_dims_order"), py::arg("name") = "");
+  m.def("Transpose", &Transpose, py::arg("output_dims_order")=std::vector<std::size_t>(), py::arg("name") = "",
+  R"mydelimiter(
+    Initialize a node containing a transpose operator.
+	:param output_dims_order : axes permutation order, must be of rank = r and values between [0;r-1]
+					with r = input_tensor.nbDims()
+	:type output_dims_order : :py:class: List[Int]
+    :param name : name of the node.
+)mydelimiter");
 }
 
 void init_Transpose(py::module &m) {
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index 40c179c4064f07896113732a7e3c32db5f19c060..b61cb40cedbb5bfbc197c401454f205c737bc6ee 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -28,7 +28,7 @@ void init_Unsqueeze(py::module &m) {
 						with r = input_tensor.nbDims() + len(axes)
 		:type axes : :py:class: List[Int]
 		)mydelimiter")
-      // Here we bind the methods of the Unsqueeze_Op that wil want to access
+      // Here we bind the methods of the Unsqueeze_Op that will want to access
       .def("get_inputs_name", &Unsqueeze_Op::getInputsName)
       .def("get_outputs_name", &Unsqueeze_Op::getOutputsName)
       .def("axes", &Unsqueeze_Op::axes);
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 52c8cc8a0199ac64b0f7bae97442178614ea5622..006eeb289f25570ddf337f048b05816102624028 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -20,6 +20,7 @@ void init_Random(py::module&);
 void init_Data(py::module&);
 void init_Database(py::module&);
 void init_DataProvider(py::module&);
+void init_Interpolation(py::module&);
 void init_Tensor(py::module&);
 void init_TensorImpl(py::module&);
 void init_Attributes(py::module&);
@@ -27,17 +28,22 @@ void init_OperatorImpl(py::module&);
 void init_Log(py::module&);
 void init_Operator(py::module&);
 void init_OperatorTensor(py::module&);
+void init_StaticAnalysis(py::module&);
 
 void init_Add(py::module&);
 void init_And(py::module&);
 void init_ArgMax(py::module&);
+void init_Atan(py::module&);
 void init_AvgPooling(py::module&);
 void init_BatchNorm(py::module&);
 void init_BitShift(py::module&);
+void init_Cast(py::module&);
+void init_Clip(py::module&);
 void init_Concat(py::module&);
 void init_ConstantOfShape(py::module&);
 void init_Conv(py::module&);
 void init_ConvDepthWise(py::module&);
+void init_DepthToSpace(py::module&);
 void init_Div(py::module&);
 void init_Erf(py::module&);
 void init_FC(py::module&);
@@ -45,7 +51,9 @@ void init_Gather(py::module&);
 void init_GenericOperator(py::module&);
 void init_GlobalAveragePooling(py::module&);
 void init_GridSample(py::module&);
+void init_Heaviside(py::module&);
 void init_Identity(py::module&);
+void init_LRN(py::module&);
 void init_LeakyReLU(py::module&);
 void init_MatMul(py::module&);
 void init_MaxPooling(py::module&);
@@ -61,6 +69,7 @@ void init_ReduceMean(py::module&);
 void init_ReduceSum(py::module&);
 void init_Reshape(py::module&);
 void init_Resize(py::module&);
+void init_Round(py::module&);
 void init_Scaling(py::module&);
 void init_Shape(py::module&);
 void init_Sigmoid(py::module&);
@@ -69,6 +78,7 @@ void init_Softmax(py::module&);
 void init_Split(py::module&);
 void init_Sqrt(py::module&);
 void init_Squeeze(py::module&);
+void init_Stack(py::module&);
 void init_Sub(py::module&);
 void init_Tanh(py::module&);
 void init_Transpose(py::module&);
@@ -78,6 +88,7 @@ void init_Node(py::module&);
 void init_GraphView(py::module&);
 void init_OpArgs(py::module&);
 void init_Connector(py::module&);
+void init_SinglePassGraphMatching(py::module&);
 
 void init_GraphRegex(py::module&);
 void init_MatchSolution(py::module&);
@@ -97,6 +108,7 @@ void init_Aidge(py::module& m) {
     init_Data(m);
     init_Database(m);
     init_DataProvider(m);
+    init_Interpolation(m);
     init_Tensor(m);
     init_TensorImpl(m);
     init_Attributes(m);
@@ -105,22 +117,28 @@ void init_Aidge(py::module& m) {
     init_GraphView(m);
     init_OpArgs(m);
     init_Connector(m);
+    init_SinglePassGraphMatching(m);
 
     init_OperatorImpl(m);
     init_Log(m);
     init_Operator(m);
     init_OperatorTensor(m);
+    init_StaticAnalysis(m);
 
     init_Add(m);
     init_And(m);
     init_ArgMax(m);
+    init_Atan(m);
     init_AvgPooling(m);
     init_BatchNorm(m);
     init_BitShift(m);
+    init_Cast(m);
+    init_Clip(m);
     init_Concat(m);
     init_Conv(m);
     init_ConvDepthWise(m);
     init_ConstantOfShape(m);
+    init_DepthToSpace(m);
     init_Div(m);
     init_Erf(m);
     init_FC(m);
@@ -128,7 +146,9 @@ void init_Aidge(py::module& m) {
     init_GenericOperator(m);
     init_GlobalAveragePooling(m);
     init_GridSample(m);
+    init_Heaviside(m);
     init_Identity(m);
+    init_LRN(m);
     init_LeakyReLU(m);
     init_MatMul(m);
     init_MaxPooling(m);
@@ -143,6 +163,7 @@ void init_Aidge(py::module& m) {
     init_ReduceSum(m);
     init_Reshape(m);
     init_Resize(m);
+    init_Round(m);
     init_Scaling(m);
     init_Shape(m);
     init_Sigmoid(m);
@@ -151,6 +172,7 @@ void init_Aidge(py::module& m) {
     init_Split(m);
     init_Sqrt(m);
     init_Squeeze(m);
+    init_Stack(m);
     init_Sub(m);
     init_Tanh(m);
     init_Transpose(m);
diff --git a/python_binding/recipes/pybind_GraphViewHelper.cpp b/python_binding/recipes/pybind_GraphViewHelper.cpp
index ac56fb4b43eb5b0a737157ec9e64c6771a692816..eee5b6a61771b4b0724699f1c45dad6c8a35f04d 100644
--- a/python_binding/recipes/pybind_GraphViewHelper.cpp
+++ b/python_binding/recipes/pybind_GraphViewHelper.cpp
@@ -23,6 +23,6 @@ namespace py = pybind11;
 
 namespace Aidge {
 void init_GraphViewHelper(py::module &m) {
-    m.def("producers", &producers, py::arg("graphview"));
+    m.def("producers", &producers, py::arg("graphview"), py::arg("constant")=false);
 }
 } // namespace Aidge
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index 6908cbd912b506a7adb7f33a02416d0173174969..77f20b9d655c6d9f6e95b23c4884bd1bc4f9ffd6 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -112,7 +112,20 @@ void init_Recipes(py::module &m)
     :type recursive: bool
     )mydelimiter");
 
-  m.def("fuse_to_metaops", fuseToMetaOps, py::arg("graph_view"), py::arg("query"), py::arg("type") = "", R"mydelimiter(
+  m.def("fuse_to_metaops", py::overload_cast<SinglePassGraphMatching&, const std::string&, const std::string&>(fuseToMetaOps), py::arg("gm"), py::arg("query"), py::arg("type") = "", R"mydelimiter(
+    Fuse each sub-graph matching a query in a Meta Operator.
+
+    :param gm: SinglePassGraphMatching containing the graph to manipulate
+    :type gm: :py:class:`aidge_core.SinglePassGraphMatching`
+    :param query: Sub-graph matching query
+    :type query: str
+    :param type: Type name of the resulting meta operators
+    :type type: str, optional
+    :return: Number of sub-graph actually fused in a Meta Operator.
+    :rtype: int
+    )mydelimiter");
+
+  m.def("fuse_to_metaops", py::overload_cast<std::shared_ptr<GraphView>, const std::string&, const std::string&>(fuseToMetaOps), py::arg("graph_view"), py::arg("query"), py::arg("type") = "", R"mydelimiter(
     Fuse each sub-graph matching a query in a Meta Operator.
 
     :param graph_view: Graph view on which we want to apply the recipe
diff --git a/python_binding/utils/pybind_Attributes.cpp b/python_binding/utils/pybind_Attributes.cpp
index bc0ccb3f4053e37c186acd919fcadae9d5d19a40..691691a86861b7a5ac731a842d8902d3520d4a23 100644
--- a/python_binding/utils/pybind_Attributes.cpp
+++ b/python_binding/utils/pybind_Attributes.cpp
@@ -30,13 +30,13 @@ DynamicAttributes test_DynamicAttributes_binding() {
     return attrs;
 }
 
-double test_DynamicAttributes_binding_check(DynamicAttributes& attrs) {
+double test_DynamicAttributes_binding_check(const DynamicAttributes& attrs) {
     return attrs.getAttr<double>("d");
 }
 
 void init_Attributes(py::module& m){
     py::class_<Attributes, std::shared_ptr<Attributes>>(m, "Attributes")
-    .def("has_attr", &Attributes::hasAttrPy, py::arg("name"))
+    .def("has_attr", &Attributes::hasAttr, py::arg("name"))
     .def("get_attr", &Attributes::getAttrPy, py::arg("name"))
     .def("__getattr__", &Attributes::getAttrPy, py::arg("name"))
     .def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp
index ca8d1f33086fb5093c76826e5a2f53df873badf5..aa42c6605217f63b6871d1a3475b9612097577cd 100644
--- a/python_binding/utils/pybind_Log.cpp
+++ b/python_binding/utils/pybind_Log.cpp
@@ -17,7 +17,7 @@ void init_Log(py::module& m){
           R"mydelimiter(
           Detailed messages for debugging purposes, providing information helpful
           for developers to trace and identify issues.
-          Detailed insights of what is appening in an operation, not useful for the
+          Detailed insights of what is happening in an operation, not useful for the
           end-user. The operation is performed nominally.
           Note: This level is disabled at compile time for Release, therefore
           inducing no runtime overhead for Release.
diff --git a/python_binding/utils/pybind_TensorUtils.cpp b/python_binding/utils/pybind_TensorUtils.cpp
index d82db0355ad641062ec89b1b331c74ccfde4c0b6..15fabdcb700ed5ca15d3d60952f55df488d41bc3 100644
--- a/python_binding/utils/pybind_TensorUtils.cpp
+++ b/python_binding/utils/pybind_TensorUtils.cpp
@@ -41,7 +41,7 @@ void addTensorUtilsFunction(py::module &m){
         :type t1: :py:class:`aidge_core.Tensor`
         :param t2: second tensor to test
         :type t2: :py:class:`aidge_core.Tensor`
-        :param relative: relative difference allowed (should be betwen 0 and 1)
+        :param relative: relative difference allowed (should be between 0 and 1)
         :type relative: float
         :param absolute: absolute error allowed (shoulmd be positive)
         :type absolute: float
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 0fa2cfdadb3af350a5668444c0a330e023818a41..c74b538a4e566b3b88e77dd4d097344d52838505 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -81,6 +81,13 @@ Aidge::ImplSpec Aidge::OperatorImpl::getRequiredSpec() const {
     else {
         requiredSpec.attrs.setAttr("type", mOp.type());
     }
+
+    const auto& inhAttrs = mOp.inheritedAttributes();
+    if (inhAttrs) {
+        if (inhAttrs->hasAttr("impl")) {
+            requiredSpec.attrs.setAttr("impl", inhAttrs->getAny("impl"));
+        }
+    }
     return requiredSpec;
 }
 
@@ -88,6 +95,10 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
     Log::debug("getBestMatch() for requirements: {}", requiredSpecs);
 
     const auto availableSpecsSet = getAvailableImplSpecs();
+    AIDGE_ASSERT(availableSpecsSet.size() > 0 ,
+                 "OperatorImpl::getBestMatch(): No available specs found by"
+                 "getAvailableSpecs(). "
+                 "Cannot find best implementation for required specs, aborting.");
     const std::vector<ImplSpec> availableSpecs(availableSpecsSet.begin(), availableSpecsSet.end());
     std::vector<int> matchingSpecs(availableSpecs.size(), -1);
 
@@ -120,15 +131,15 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
             std::string qualifier;
             const auto qualifierPos = std::find_if(attrName.begin(), attrName.end(),
                 [](char c) { return c == ':'; });
-            if (qualifierPos != attrName.begin()) {
+            if (qualifierPos != attrName.end()) {
                 name = attrName.substr(0, qualifierPos - attrName.begin());
-                qualifier = attrName.substr(qualifierPos - attrName.begin());
+                qualifier = attrName.substr(qualifierPos - attrName.begin() + 1);
             }
-
             const bool mandatory = (qualifier == "!");
             if (mandatory) {
                 // Required attribute:
                 if (!spec.attrs.hasAttr(name)) {
+                    Log::debug("Could not find mandatory attribute '{}'.", name);
                     // Missing attribute
                     match = false;
                     break;
@@ -136,6 +147,7 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
                 else if (requiredSpecs.attrs.getAny(attrName) < spec.attrs.getAny(name)
                     || spec.attrs.getAny(name) < requiredSpecs.attrs.getAny(attrName))
                 {
+                    Log::debug("Attribute ({}) value mismatch {} != {}.", name, requiredSpecs.attrs.getAttr<std::string>(attrName), spec.attrs.getAttr<std::string>(name));
                     // Attribute value mismatch
                     match = false;
                     break;
@@ -161,6 +173,10 @@ Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs)
         Log::debug("  {}:{} - {}", (match) ? "MATCH" : "MISMATCH", priority, spec);
     }
 
+    if(matchingSpecs.empty()){
+        Log::debug("  No spec to match registered, returning requiredSpecs.");
+        return requiredSpecs;
+    }
     // Return best match
     const auto bestMatch = std::max_element(matchingSpecs.begin(), matchingSpecs.end());
     if (*bestMatch >= 0) {
@@ -367,6 +383,6 @@ std::shared_ptr<Aidge::ProdConso> Aidge::OperatorImpl::getProdConso() const {
     return std::make_shared<ProdConso>(mOp);
 }
 
-std::set<Aidge::ImplSpec> Aidge::OperatorImpl::getAvailableImplSpecs() const {
-    return std::set<ImplSpec>();
+std::vector<Aidge::ImplSpec> Aidge::OperatorImpl::getAvailableImplSpecs() const {
+    return std::vector<ImplSpec>();
 }
diff --git a/src/backend/cpu/data/TensorImpl.cpp b/src/backend/cpu/data/TensorImpl.cpp
index ed3c96f80c1b8bafd70425451d6618428d1888f0..506287a0c520915e6426f1f0b64d9c562c754d33 100644
--- a/src/backend/cpu/data/TensorImpl.cpp
+++ b/src/backend/cpu/data/TensorImpl.cpp
@@ -47,8 +47,8 @@ void Aidge::TensorImpl_cpu<T>::copyCast(const void *src, const Aidge::DataType s
         return;
     }
 
+    AIDGE_ASSERT(offset + length <= mNbElts, "TensorImpl_cpu<{}>::copyCast(): copy offset ({}) + length ({}) is above capacity ({})", typeid(T).name(), offset, length, mNbElts);
     T* dstT = static_cast<T *>(rawPtr(offset));
-    AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "TensorImpl_cpu<{}>::copyCast(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts);
     switch (srcDt)
     {
         case DataType::Float64:
diff --git a/src/data/Data.cpp b/src/data/Data.cpp
index 62a883d08a401e02c86408214a061f893ffbfb4a..865b4ff2dcff68753fb5a4cc9cafccdd129e3c8a 100644
--- a/src/data/Data.cpp
+++ b/src/data/Data.cpp
@@ -11,6 +11,50 @@
 
 #include "aidge/data/Data.hpp"
 
+bool Aidge::isDataTypeFloatingPoint(const DataType& type) {
+    switch (type) {
+    case DataType::Float64:
+    case DataType::Float32:
+    case DataType::Float16:
+    case DataType::BFloat16:  return true;
+    default:                  return false;
+    }
+    return false;
+}
+
+size_t Aidge::getDataTypeBitWidth(const DataType& type) {
+    switch (type) {
+    case DataType::Float64:   return 64;
+    case DataType::Float32:   return 32;
+    case DataType::Float16:   return 16;
+    case DataType::BFloat16:  return 16;
+    case DataType::Binary:    return 1;
+    case DataType::Ternary:   return 2;
+    case DataType::Int2:      return 2;
+    case DataType::Int3:      return 3;
+    case DataType::Int4:      return 4;
+    case DataType::Int5:      return 5;
+    case DataType::Int6:      return 6;
+    case DataType::Int7:      return 7;
+    case DataType::Int8:      return 8;
+    case DataType::Int16:     return 16;
+    case DataType::Int32:     return 32;
+    case DataType::Int64:     return 64;
+    case DataType::UInt2:     return 2;
+    case DataType::UInt3:     return 3;
+    case DataType::UInt4:     return 4;
+    case DataType::UInt5:     return 5;
+    case DataType::UInt6:     return 6;
+    case DataType::UInt7:     return 7;
+    case DataType::UInt8:     return 8;
+    case DataType::UInt16:    return 16;
+    case DataType::UInt32:    return 32;
+    case DataType::UInt64:    return 64;
+    default:                  return 0;
+    }
+    return 0;
+}
+
 Aidge::DataFormatTranspose Aidge::getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
     // Permutation array from default format to src format
     const auto srcDefToFormat = DataFormatTransposeDict[static_cast<int>(src)];
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index abfc91c6cdf9fd4f6eb46100074b22083514d82e..c834167abe15fb8a7ce96053a87a958b7515fe17 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -14,6 +14,7 @@
 #include <cstddef>
 #include <vector>
 
+#include "aidge/data/half.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/Abs.hpp"
@@ -23,19 +24,19 @@
 #include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/Sub.hpp"
 #include "aidge/operator/Sqrt.hpp"
-#include "aidge/operator/Transpose.hpp"
 #include "aidge/utils/Types.h"
 
+namespace Aidge {
 
-Aidge::Tensor::~Tensor() noexcept = default;
+Tensor::~Tensor() noexcept = default;
 
 
-Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
+Tensor Tensor::operator+(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
     AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
-    auto add_ = Add_Op(2);
+    auto add_ = Add_Op();
     add_.associateInput(0, std::make_shared<Tensor>(*this));
     add_.associateInput(1, std::make_shared<Tensor>(other));
     add_.setDataType(dataType());
@@ -43,11 +44,28 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
     add_.setBackend(mImpl->backend());
     add_.forward();
     // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-    return add_.getOutput(0)->clone();
+    return *add_.getOutput(0);
 }
 
+Tensor& Tensor::operator+=(const Tensor& other) {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto add_ = Add_Op();
+    const auto thisPtr = std::make_shared<Tensor>(*this);
+    add_.associateInput(0, thisPtr);
+    add_.associateInput(1, std::make_shared<Tensor>(other));
+    add_.setOutput(0, thisPtr);
+    add_.setDataType(dataType());
+    add_.setDataFormat(dataFormat());
+    add_.setBackend(mImpl->backend());
+    add_.forward();
+    return *this;
+}
 
-Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
+
+Tensor Tensor::operator-(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -60,11 +78,29 @@ Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
     sub_.setBackend(mImpl->backend());
     sub_.forward();
     // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-    return sub_.getOutput(0)->clone();
+    return *sub_.getOutput(0);
 }
 
+Tensor& Tensor::operator-=(const Tensor& other) {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto sub_ = Sub_Op();
+    const auto thisPtr = std::make_shared<Tensor>(*this);
+    sub_.associateInput(0, thisPtr);
+    sub_.associateInput(1, std::make_shared<Tensor>(other));
+    sub_.setOutput(0, thisPtr);
+    sub_.setDataType(dataType());
+    sub_.setDataFormat(dataFormat());
+    sub_.setBackend(mImpl->backend());
+    sub_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return *this;
+}
 
-Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
+
+Tensor Tensor::operator*(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -80,8 +116,26 @@ Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
     return mul_.getOutput(0)->clone();
 }
 
+Tensor& Tensor::operator*=(const Tensor& other) {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto mul_ = Mul_Op();
+    const auto thisPtr = std::make_shared<Tensor>(*this);
+    mul_.associateInput(0, thisPtr);
+    mul_.associateInput(1, std::make_shared<Tensor>(other));
+    mul_.setOutput(0, thisPtr);
+    mul_.setDataType(dataType());
+    mul_.setDataFormat(dataFormat());
+    mul_.setBackend(mImpl->backend());
+    mul_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return *this;
+}
+
 
-Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
+Tensor Tensor::operator/(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
     AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
     AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
@@ -97,7 +151,25 @@ Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
     return div_.getOutput(0)->clone();
 }
 
-Aidge::Tensor Aidge::Tensor::sqrt() const {
+Tensor& Tensor::operator/=(const Tensor& other) {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto div_ = Div_Op();
+    const auto thisPtr = std::make_shared<Tensor>(*this);
+    div_.associateInput(0, thisPtr);
+    div_.associateInput(1, std::make_shared<Tensor>(other));
+    div_.setOutput(0, thisPtr);
+    div_.setDataType(dataType());
+    div_.setDataFormat(dataFormat());
+    div_.setBackend(mImpl->backend());
+    div_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return *this;
+}
+
+Tensor Tensor::sqrt() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     auto sqrt_ = Sqrt_Op();
     sqrt_.associateInput(0, std::make_shared<Tensor>(*this));
@@ -108,7 +180,7 @@ Aidge::Tensor Aidge::Tensor::sqrt() const {
     return sqrt_.getOutput(0)->clone();
 }
 
-Aidge::Tensor Aidge::Tensor::abs() const {
+Tensor Tensor::abs() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     auto abs_ = Abs_Op();
     abs_.associateInput(0, std::make_shared<Tensor>(*this));
@@ -119,7 +191,7 @@ Aidge::Tensor Aidge::Tensor::abs() const {
     return abs_.getOutput(0)->clone();
 }
 
-Aidge::Tensor Aidge::Tensor::mean() const {
+Tensor Tensor::mean() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     // TODO: should be the default behavior of ReduceMean_Op
     // No need to specify the list of all axes!
@@ -134,27 +206,27 @@ Aidge::Tensor Aidge::Tensor::mean() const {
     return mean_.getOutput(0)->clone();
 }
 
-Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
-    if (this == &other) {
-        return *this;
-    }
-    resize(other.dims(), other.strides());
-    setDataType(other.dataType(), false);  // do not convert existing data
-    if (other.hasImpl()) {
-        if (hasImpl()) {
-            copyFrom(other);
-        } else {
-            // Perform a shallow copy only
-            setImpl(other.mImpl, other.mImplOffset);
-        }
-    } else {
-        setImpl(nullptr);
-    }
-    return *this;
-}
-
-
-void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t device, bool copyFrom) {
+// Tensor& Tensor::operator=(const Tensor& other) {
+//     if (this == &other) {
+//         return *this;
+//     }
+//     resize(other.dims(), other.strides());
+//     setDataType(other.dataType(), false);  // do not convert existing data
+//     if (other.hasImpl()) {
+//         if (hasImpl()) {
+//         //     copyFrom(other);
+//         // } else {
+//             // Perform a shallow copy only
+//             setImpl(other.mImpl, other.mImplOffset);
+//         }
+//     } else {
+//         setImpl(nullptr);
+//     }
+//     return *this;
+// }
+
+
+void Tensor::setBackend(const std::string &name, DeviceIdx_t device, bool copyFrom) {
     if (mImpl) {
         if (mImpl->device() != std::make_pair(name, device)) {
             // Backend change: create new impl, copy from old to new and replace
@@ -171,8 +243,8 @@ void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t devic
     }
     }
 
-void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
-                           std::vector<Aidge::DimSize_t> strides) {
+void Tensor::resize(const std::vector<DimSize_t>& dims,
+                           std::vector<DimSize_t> strides) {
     if (dims.empty()) {  // scalar
         mDims = std::vector<DimSize_t>(0);
         mStrides = std::vector<DimSize_t>({1});
@@ -234,11 +306,12 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
     }
 }
 
-std::string Aidge::Tensor::toString() const {
-    AIDGE_ASSERT(
-        mImpl && (undefined() || (dims() == std::vector<DimSize_t>({0})) ||
-                  (mImpl->hostPtr() != nullptr)),
-        "tensor should have a valid host pointer");
+std::string Tensor::toString() const {
+
+    if (!hasImpl() || undefined()) {
+        // Return no value on no implementation or undefined size
+        return std::string("{}");
+    }
 
     // TODO: move lambda elsewhere?
     auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) {
@@ -272,8 +345,10 @@ std::string Aidge::Tensor::toString() const {
     };
 
     if (dims().empty()) {
+        // The Tensor is defined with rank 0, hence scalar
         return ptrToString(mDataType, mImpl->hostPtr(), 0);
     }
+
     std::string res;
     std::size_t dim = 0;
     std::size_t counter = 0;
@@ -340,7 +415,7 @@ std::string Aidge::Tensor::toString() const {
     return res;
 }
 
-Aidge::Tensor Aidge::Tensor::extract(
+Tensor Tensor::extract(
     const std::vector<std::size_t>& fixedCoord) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
     AIDGE_ASSERT(fixedCoord.size() <= mDims.size(),
@@ -356,7 +431,7 @@ Aidge::Tensor Aidge::Tensor::extract(
     return subTensor;
 }
 
-Aidge::Tensor Aidge::Tensor::extract(
+Tensor Tensor::extract(
     const std::vector<std::size_t>& startCoord,
     const std::vector<std::size_t>& dims) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
@@ -370,7 +445,7 @@ Aidge::Tensor Aidge::Tensor::extract(
     return subTensor;
 }
 
-void Aidge::Tensor::makeContiguous() {
+void Tensor::makeContiguous() {
     if (!mImpl || isContiguous()) {
         return;
     }
@@ -408,7 +483,7 @@ void Aidge::Tensor::makeContiguous() {
     resize(mDims);
 }
 
-void Aidge::Tensor::copyCast(const Tensor& src) {
+void Tensor::copyCast(const Tensor& src) {
     if (&src == this) {
         return;
     }
@@ -429,7 +504,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) {
                         src.size(), mImplOffset);
 }
 
-void Aidge::Tensor::copyFrom(const Tensor& src) {
+void Tensor::copyFrom(const Tensor& src) {
     if (&src == this) {
         return;
     }
@@ -450,7 +525,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
                         mImplOffset);
 }
 
-void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
+void Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
     std::vector<DimSize_t> newDims;
     for (std::size_t i = 0; i < src.dims().size(); ++i) {
         newDims.push_back(src.dims()[transpose[i]]);
@@ -492,11 +567,11 @@ void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t
     setImpl(newImpl);
 }
 
-void Aidge::Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
+void Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
     copyTranspose(src, std::vector<DimSize_t>(transpose.begin(), transpose.end()));
 }
 
-void Aidge::Tensor::copyCastFrom(const Tensor& src,
+void Tensor::copyCastFrom(const Tensor& src,
                                  std::shared_ptr<Tensor>& movedSrcPtr) {
     if (&src == this) {
         return;
@@ -529,13 +604,13 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src,
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) {
+Tensor& Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) {
     // Scott Meyers' solution to avoid code duplication
     return const_cast<Tensor&>(
         static_cast<const Tensor&>(*this).refContiguous(fallback));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refContiguous(
+const Tensor& Tensor::refContiguous(
     std::shared_ptr<Tensor>& fallback) const {
     AIDGE_ASSERT(getImpl(),
                  "no backend was set for tensor, cannot refCast() it");
@@ -554,15 +629,15 @@ const Aidge::Tensor& Aidge::Tensor::refContiguous(
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
-                                      const Aidge::DataType& dt) {
+Tensor& Tensor::refCast(std::shared_ptr<Tensor>& fallback,
+                                      const DataType& dt) {
     // Scott Meyers' solution to avoid code duplication
     return const_cast<Tensor&>(
         static_cast<const Tensor&>(*this).refCast(fallback, dt));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
-                                            const Aidge::DataType& dt) const {
+const Tensor& Tensor::refCast(std::shared_ptr<Tensor>& fallback,
+                                            const DataType& dt) const {
     AIDGE_ASSERT(getImpl(),
                  "no backend was set for tensor, cannot refCast() it");
 
@@ -595,7 +670,7 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback,
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
+Tensor& Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
                                       const std::string& backend,
                                       DeviceIdx_t device) {
     // Scott Meyers' solution to avoid code duplication
@@ -603,7 +678,7 @@ Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
         static_cast<const Tensor&>(*this).refFrom(fallback, backend, device));
 }
 
-const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
+const Tensor& Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
                                             const std::string& backend,
                                             DeviceIdx_t device) const {
     AIDGE_ASSERT(getImpl(),
@@ -636,8 +711,8 @@ const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback,
     }
 }
 
-Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
-                                  const Aidge::DataType& dt,
+Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback,
+                                  const DataType& dt,
                                   const std::string& backend,
                                   DeviceIdx_t device) {
     // Scott Meyers' solution to avoid code duplication
@@ -645,8 +720,8 @@ Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
         static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device));
 }
 
-const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
-                                        const Aidge::DataType& dt,
+const Tensor& Tensor::ref(std::shared_ptr<Tensor>& fallback,
+                                        const DataType& dt,
                                         const std::string& backend,
                                         DeviceIdx_t device) const {
     AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot ref() it");
@@ -670,9 +745,64 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback,
     }
 }
 
-std::set<std::string> Aidge::Tensor::getAvailableBackends() {
+
+std::vector<std::size_t>
+Tensor::toCoord(const std::vector<DimSize_t>& dimensions, std::size_t index) {
+    std::vector<std::size_t> coord(dimensions.size());
+    std::size_t i = dimensions.size();
+
+    while (i-- > 0) {
+        coord[i] = (index % dimensions[i]);
+        index /= dimensions[i];
+    }
+    return coord;
+}
+
+
+std::size_t Tensor::toIndex(const std::vector<DimSize_t> &dimensions, const std::vector<std::size_t>& coords) {
+    AIDGE_ASSERT(coords.size() == dimensions.size(), "Tensor::getIdx(): Coordinates does not match number of dimensions.\n\tCoords : {}\n\tDimensions: {}",coords, dimensions);
+    std::size_t index = 0;
+    std::size_t dimensions_s = 1; // stride
+    std::size_t i = dimensions.size();
+    while (i-- > 0) {
+        index += coords[i] * dimensions_s;
+        dimensions_s *= dimensions[i];
+    }
+    return index;
+}
+
+template<typename T>
+bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<T>& coords){
+    AIDGE_ASSERT(coords.size() == dimensions.size(),
+                 "Coordinates({}) to compare have not "
+                 "the same number of dimension as tensor dimensions({}), aborting.",
+                 coords,
+                 dimensions);
+    bool isInBound {true};
+    for(std::size_t i = 0 ; i < coords.size() && isInBound; ++i ){
+        isInBound = coords[i] >= 0 && coords[i] < static_cast<T>(dimensions[i]) ;
+    }
+    return isInBound;
+}
+
+
+bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::size_t index){
+    return index < std::accumulate(dimensions.cbegin(), dimensions.cend(), std::size_t(1), std::multiplies<std::size_t>());
+}
+
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int16_t>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int32_t>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::int64_t>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<std::size_t>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<float>& coords);
+template bool Tensor::isInBounds(const std::vector<DimSize_t>& dimensions, const std::vector<double>& coords);
+
+
+std::set<std::string> Tensor::getAvailableBackends() {
     std::set<std::string> backendsList;
-    for (const auto& tupleKey : Registrar<Tensor>::getKeys())
+    for (const auto& tupleKey : Registrar<Tensor>::getKeys()) {
         backendsList.insert(std::get<0>(tupleKey));
+    }
     return backendsList;
 }
+}  // namespace Aidge
diff --git a/src/data/interpolation.cpp b/src/data/interpolation.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ce5431a2b3bd742f98a169666811bd5d373a5c24
--- /dev/null
+++ b/src/data/interpolation.cpp
@@ -0,0 +1,237 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/data/Interpolation.hpp"
+
+#include <algorithm>  // std::clamp
+#include <bitset>
+#include <cmath>      // std::ceil, std::floor
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int64_t
+#include <stdexcept>  // std::runtime_error
+#include <utility>    // std::make_pair, std::set
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/half.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Log.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+template <typename T>
+[[noreturn]] T
+Interpolation::interpolate(const std::vector<float> & /*originalIndex*/,
+                           const std::vector<Point<T>> & /*points*/,
+                           const Mode /*interpMode*/) {
+    AIDGE_THROW_OR_ABORT(
+        std::runtime_error,
+        "interpolate() is backend dependendant and should be"
+        "called from derived classes: Interpolation<Backend>::interpolate(...)"
+        "Meaning that for CPU backend, InterpolationCPU::interpolate() should "
+        "be called.");
+}
+
+std::vector<float> Interpolation::untransformCoordinates(
+    const std::vector<DimSize_t> &transformedCoords,
+    const std::vector<DimSize_t> &inputDims,
+    const std::vector<DimSize_t> &outputDims,
+    const Interpolation::CoordinateTransformation coordTransfoMode) {
+    AIDGE_ASSERT(
+        inputDims.size() == outputDims.size(),
+        "Interpolate::untransformCoordinates: input and output coordinates "
+        "dimension number mismatch, they should be equal."
+        "Got inputDims({}) and outputDims ({}).",
+        inputDims,
+        outputDims);
+    AIDGE_ASSERT(
+        transformedCoords.size() == outputDims.size(),
+        "Interpolate::untransformCoordinates: coordinates dimension mismatch, "
+        "transformed coords number should be equal to output dimension number."
+        "Got coords to transform ({}) and outputDims ({})",
+        transformedCoords,
+        outputDims);
+    std::vector<float> originalCoords(transformedCoords.size());
+
+    for (DimIdx_t i = 0; i < transformedCoords.size(); ++i) {
+        float scale = static_cast<float>(outputDims[i]) /
+                      static_cast<float>(inputDims[i]);
+
+        switch (coordTransfoMode) {
+        case CoordinateTransformation::AlignCorners:
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "Interpolation::untransformCoords: Unsupported Coordinate "
+                "transform : AlignCorners");
+            break;
+        case CoordinateTransformation::Asymmetric:
+            originalCoords[i] = transformedCoords[i] / scale;
+            break;
+        case CoordinateTransformation::HalfPixel:
+            originalCoords[i] = (transformedCoords[i] + 0.5) / scale - 0.5;
+            break;
+        case CoordinateTransformation::HalfPixelSymmetric:
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "Interpolation::untransformCoords: Unsupported Coordinate "
+                "transform : HalfPixelSymmetric");
+            break;
+        case Interpolation::CoordinateTransformation::PytorchHalfPixel:
+
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "Interpolation::untransformCoords: Unsupported Coordinate "
+                "transform : PytorchHalfPixel");
+            break;
+        }
+    }
+    return originalCoords;
+}
+
+/**
+ * @details Generates a list of all neighbours of a given coordinate.
+ * Since the coordinates are floating points as they are the result of
+ * Interpolation::untransformCoords, they are approximation of coordinates in
+ * originalTensor frame from coordinates in interpolatedTensor frame.
+ *
+ * So to retrieve the neghbouring values, we must apply either floor() or
+ * ceil() to each coordinate.
+ *
+ * In order to generate the list of all combinations
+ * available, we simply iterate through the bits of each values from 0 to
+ * tensorDims.
+ * @example : in 2 dimensions , we  have the point (1.3, 3.4)
+ * we iterate up to 2^2 - 1 and
+ * 0 = 0b00 -> (floor(x), floor(y)) = (1,3)
+ * 1 = 0b01 -> (floor(x), ceil(y))  = (1,4)
+ * 2 = 0b10 -> (ceil(x) , floor(y)) = (2,3)
+ * 3 = 0b11 -> (ceil(x) , ceil(y))  = (2,4)
+ */
+template <typename T>
+std::set<Interpolation::Point<T>>
+Interpolation::retrieveNeighbours(const T *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode) {
+
+    Log::debug("retrieveNeighbours: TensorDims : {}", tensorDims);
+    Log::debug("retrieveNeighbours: coords to interpolate : {}", coords);
+
+    // Will retrieve out of bound values depending on given padding mode.
+    // auto retrieveOutOfBoundValue =
+    //     [&tensorValues, &tensorDims, &paddingMode](Coords coord) -> T {
+    //     std::vector<DimSize_t> rectifiedCoord;
+    //     rectifiedCoord.reserve(coord.size());
+    //     switch (paddingMode) {
+    //     case Aidge::PadBorderType::Edge: {
+    //         for (DimSize_t i = 0; i < coord.size(); ++i) {
+    //             rectifiedCoord[i] = coord[i] < 0 ? 0 : tensorDims[i] - 1;
+    //         }
+    //         return tensorValues[Tensor::getIdx(tensorDims, rectifiedCoord)];
+    //     }
+    //     case Aidge::PadBorderType::Zero: {
+    //         return static_cast<T>(0);
+    //     }
+    //     default: {
+    //         AIDGE_THROW_OR_ABORT(
+    //             std::runtime_error,
+    //             "Unsupported padding mode as of now for interpolation.");
+    //     }
+    //     }
+    // };
+
+    std::set<Point<T>> neighbours;
+    const std::size_t nbNeighbours = std::size_t(1) << tensorDims.size();
+    Coords neighbourCoords(tensorDims.size());
+
+    for (std::size_t i = 0; i < nbNeighbours; ++i) {
+        const std::bitset<MaxDim> bits = std::bitset<MaxDim>{i};
+        for (size_t j = 0; j < tensorDims.size(); ++j) {
+            neighbourCoords[j] =
+                bits[j] == 0 ? std::ceil(coords[j]) : std::floor(coords[j]);
+        }
+
+        T value;
+        if (Tensor::isInBounds(tensorDims, neighbourCoords)) {
+            // cast from unsigned to signed won't create problem as we ensured
+            // that all neighboursCoords values are > 0 with isInBounds
+            value = tensorValues[Tensor::toIndex(
+                tensorDims,
+                std::vector<DimSize_t>(neighbourCoords.begin(),
+                                       neighbourCoords.end()))];
+        } else {
+            switch (paddingMode) {
+                case PadBorderType::Edge:
+                    for (DimSize_t j = 0; j < tensorDims.size(); ++j) {
+                        neighbourCoords[j] = (neighbourCoords[j] < 0) ? 0 :
+                                                ((neighbourCoords[j] >= static_cast<std::int64_t>(tensorDims[j])) ? (tensorDims[j] - 1) :
+                                                    neighbourCoords[j]);
+                    }
+                    value = tensorValues[Tensor::toIndex(
+                                tensorDims,
+                                std::vector<DimSize_t>(neighbourCoords.begin(),
+                                                    neighbourCoords.end()))];
+                    break;
+                case PadBorderType::Zero:
+                    value = static_cast<T>(0);
+                    break;
+                default:
+                    AIDGE_THROW_OR_ABORT(
+                        std::runtime_error,
+                        "Unsupported padding mode as of now for interpolation.");
+            }
+        }
+        neighbours.insert(std::make_pair(neighbourCoords, value));
+    }
+    Log::debug("Interpolation::retrieveNeighbours(): neighbourCoords: {}",
+               neighbours);
+    return neighbours;
+}
+
+template std::set<Interpolation::Point<int16_t>>
+Interpolation::retrieveNeighbours(const int16_t *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<int32_t>>
+Interpolation::retrieveNeighbours(const int32_t *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<int64_t>>
+Interpolation::retrieveNeighbours(const int64_t *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<half_float::half>>
+Interpolation::retrieveNeighbours(const half_float::half *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<float>>
+Interpolation::retrieveNeighbours(const float *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+template std::set<Interpolation::Point<double>>
+Interpolation::retrieveNeighbours(const double *tensorValues,
+                                  const std::vector<DimSize_t> &tensorDims,
+                                  const std::vector<float> &coords,
+                                  const PadBorderType paddingMode);
+
+} // namespace Aidge
diff --git a/src/filler/ConstantFiller.cpp b/src/filler/ConstantFiller.cpp
index 1e992f4a192c2fd629b2c813c902b127f29a2b02..b2118866f92290103d50290085c7675215a4d997 100644
--- a/src/filler/ConstantFiller.cpp
+++ b/src/filler/ConstantFiller.cpp
@@ -39,6 +39,7 @@ void Aidge::constantFiller(std::shared_ptr<Aidge::Tensor> tensor, T constantValu
     tensor->copyCastFrom(tensorWithValues);
 }
 
-
+template void Aidge::constantFiller<std::int32_t>(std::shared_ptr<Aidge::Tensor>, std::int32_t);
+template void Aidge::constantFiller<std::int64_t>(std::shared_ptr<Aidge::Tensor>, std::int64_t);
 template void Aidge::constantFiller<float>(std::shared_ptr<Aidge::Tensor>, float);
 template void Aidge::constantFiller<double>(std::shared_ptr<Aidge::Tensor>, double);
diff --git a/src/filler/UniformFiller.cpp b/src/filler/UniformFiller.cpp
index a942f59d717fd8d7b541ee28868a7fb9f2e7cd95..1951fcc623612bd688048fcc5fb71526032b2a4a 100644
--- a/src/filler/UniformFiller.cpp
+++ b/src/filler/UniformFiller.cpp
@@ -8,8 +8,9 @@
  * SPDX-License-Identifier: EPL-2.0
  *
  ********************************************************************************/
+#include <cstdint>  // std::int32_t
 #include <memory>
-#include <random>  // normal_distribution, uniform_real_distribution
+#include <random>   // normal_distribution, uniform_real_distribution
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/filler/Filler.hpp"
@@ -19,10 +20,16 @@ template <typename T>
 void Aidge::uniformFiller(std::shared_ptr<Aidge::Tensor> tensor, T min, T max) {
     AIDGE_ASSERT(tensor->getImpl(),
                  "Tensor got no implementation, cannot fill it.");
-    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
+    AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type {} and {}",NativeType<T>::type, tensor->dataType());
 
 
-    std::uniform_real_distribution<T> uniformDist(min, max);
+     using DistType = typename std::conditional<
+        std::is_integral<T>::value,
+        std::uniform_int_distribution<T>,
+        std::uniform_real_distribution<T>
+    >::type;
+
+    DistType uniformDist(min, max);
 
     std::shared_ptr<Aidge::Tensor> cpyTensor;
     // Create cpy only if tensor not on CPU
@@ -42,3 +49,7 @@ template void Aidge::uniformFiller<float>(std::shared_ptr<Aidge::Tensor>, float,
                                           float);
 template void Aidge::uniformFiller<double>(std::shared_ptr<Aidge::Tensor>,
                                            double, double);
+template void Aidge::uniformFiller<std::int32_t>(std::shared_ptr<Aidge::Tensor>,
+                                                 std::int32_t, std::int32_t);
+template void Aidge::uniformFiller<std::int64_t>(std::shared_ptr<Aidge::Tensor>,
+                                                 std::int64_t, std::int64_t);
\ No newline at end of file
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index ef322fe5b795b9cb9c62c3593abdd330fd471575..465359757eadd2799aa7f272e2d85b032a60cfdd 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -103,7 +103,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
         std::string givenName =
             (node_ptr->name().empty())
                 ? "<em>" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + "</em>"
-                : "\"" + node_ptr->name() + "\\n<sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
+                : "\"" + node_ptr->name() + "<br/><sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
 
         std::string nodeCls = "";
         if (node_ptr->type() == "Producer") {
@@ -144,27 +144,31 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
       }
       IOIndex_t outputIdx = 0;
       for (const auto& childs : node_ptr->getOrderedChildren()) {
-        for (const auto& child : childs) {
+        // Keep only unique childs in order to avoid duplicating connections
+        const auto uniqueChilds = std::set<NodePtr>(childs.begin(), childs.end());
+        for (const auto& child : uniqueChilds) {
           if (child != nullptr) {
             IOIndex_t inputIdx = 0;
             for (auto parent : child->inputs()) {
               if (parent.first == node_ptr && parent.second == outputIdx) {
                 // Add-on to display the operator's output dimensions
                 std::string dims = "";
+                std::string dtype = "";
                 const auto op = std::dynamic_pointer_cast<OperatorTensor>(node_ptr->getOperator());
                 if (op && !op->getOutput(outputIdx)->undefined()) {
                   dims += " " + fmt::format("{}", op->getOutput(outputIdx)->dims());
+                  dtype += " " + fmt::format("{}", op->getOutput(outputIdx)->dataType());
                 }
 
                 if (mNodes.find(child) != mNodes.end()) {
-                  fmt::print(fp.get(), "{}_{}-->|\"{}{}&rarr;{}\"|{}_{}\n", node_ptr->type(), namePtrTable.at(node_ptr),
-                              outputIdx, dims, inputIdx, child->type(), namePtrTable.at(child));
+                  fmt::print(fp.get(), "{}_{}-->|\"{}{}{}<br/>&darr;<br/>{}\"|{}_{}\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                              outputIdx, dims, dtype, inputIdx, child->type(), namePtrTable.at(child));
                 }
                 else if (verbose) {
-                  fmt::print(fp.get(), "{}_{}-->|\"{}{}&rarr;{}\"|{}:::externalCls\n", node_ptr->type(), namePtrTable.at(node_ptr),
-                              outputIdx, dims, inputIdx, static_cast<void*>(child.get()));
+                  fmt::print(fp.get(), "{}_{}-->|\"{}{}{}<br/>&darr;<br/>{}\"|{}:::externalCls\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                              outputIdx, dims, dtype, inputIdx, static_cast<void*>(child.get()));
                 }
-                break;
+                // Do no break here because the same child can be connected to several inputs
               }
               ++inputIdx;
             }
@@ -178,11 +182,13 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
     for (const auto& input : mInputNodes) {
       if (input.first != nullptr) {
         const auto& op_ = std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator());
-        if (op_->getInput(input.second) && (!op_->getInput(input.second)->empty())) {
-            fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|\"&rarr;{}{}\"|{}_{}\n", inputIdx, inputIdx,
-                    input.second, op_->getInput(input.second)->dims(), input.first->type(), namePtrTable.at(input.first));
+        if (op_->getInput(input.second) && (!op_->getInput(input.second)->undefined())) {
+            std::string dims = " " + fmt::format("{}", op_->getInput(input.second)->dims());
+            std::string dtype = " " + fmt::format("{}", op_->getInput(input.second)->dataType());
+            fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|\"{}{}<br/>&darr;<br/>{}\"|{}_{}\n", inputIdx, inputIdx,
+                    dims, dtype, input.second, input.first->type(), namePtrTable.at(input.first));
         } else {
-            fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|\"&rarr;{}\"|{}_{}\n", inputIdx, inputIdx,
+            fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|\"&darr;<br/>{}\"|{}_{}\n", inputIdx, inputIdx,
                     input.second, input.first->type(), namePtrTable.at(input.first));
         }
       }
@@ -197,14 +203,16 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
       if (output.first != nullptr) {
         // Add-on to display the operator's output dimensions
         std::string dims = "";
+        std::string dtype = "";
         const auto op = std::dynamic_pointer_cast<OperatorTensor>(output.first->getOperator());
         if (op && op->getOutput(output.second) && !op->getOutput(output.second)->undefined()) {
           dims += " " + fmt::format("{}", op->getOutput(output.second)->dims());
+          dtype += " " + fmt::format("{}", op->getOutput(output.second)->dataType());
         }
 
-        fmt::print(fp.get(), "{}_{}--->|\"{}{}&rarr;\"|output{}((out#{})):::outputCls\n",
+        fmt::print(fp.get(), "{}_{}--->|\"{}{}{}<br/>&darr;\"|output{}((out#{})):::outputCls\n",
                     output.first->type(), namePtrTable.at(output.first), output.second,
-                    dims, outputIdx, outputIdx);
+                    dims, dtype, outputIdx, outputIdx);
       }
       else {
         fmt::print(fp.get(), "output{}((out#{})):::outputCls\n", outputIdx, outputIdx);
@@ -270,7 +278,10 @@ void Aidge::GraphView::setRootNode(NodePtr node) {
 std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::inputNodes() const {
     std::set<std::shared_ptr<Aidge::Node>> nodes;
     for (const auto& node : mInputNodes) {
-        nodes.insert(node.first);
+        // Do not include dummy inputs
+        if (node.first) {
+            nodes.insert(node.first);
+        }
     }
     return nodes;
 }
@@ -278,7 +289,10 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::inputNodes() const {
 std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::outputNodes() const {
     std::set<std::shared_ptr<Aidge::Node>> nodes;
     for (const auto& node : mOutputNodes) {
-        nodes.insert(node.first);
+        // Do not include dummy outputs
+        if (node.first) {
+            nodes.insert(node.first);
+        }
     }
     return nodes;
 }
@@ -341,7 +355,7 @@ Aidge::IOIndex_t Aidge::GraphView::getNbDataInputs() const {
   for (const std::shared_ptr<Node> &inNode : inputNodes()) {
     // We cannot simply add inNode->nbDataInputs(), as input nodes may already
     // have some inputs connected within the GraphView, which would therefore not
-    // constitue inputs (from outside) for the GraphView!
+    // constitute inputs (from outside) for the GraphView!
     const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
         inNode->dataInputs();
 
@@ -423,7 +437,7 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
     // remove current Data connections and use dummy inputs to propagate dimensions
     // setInputs
     // Link every tensor to the right pointer
-    // following parent - children informations
+    // following parent - children information
     if (!dims.empty()){
       Log::debug("forwardDims(): setting graph input dims ({} dims provided).", dims.size());
 
@@ -522,7 +536,10 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                 }
 
                 if (parentsForwarded && op->forwardDims(allowDataDependency)) {
-                    // Recompute everytime, even if it was already computed in a
+                    Log::debug("Dimensions forwarded for node {} (of type {})",
+                        nodePtr->name(), nodePtr->type());
+
+                    // Recompute every time, even if it was already computed in a
                     // previous call of forwardDims(), as the graph may have changed!
                     dimsForwarded.insert(nodePtr);
                     for (const auto& child : nodePtr->getChildren()) {
@@ -532,7 +549,9 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                     }
                 }
                 else {
-                    Log::debug("Unable to forward dimensions for node {} (of type {}) yet", nodePtr->name(), nodePtr->type());
+                    if (parentsForwarded) {
+                        Log::debug("Unable to forward dimensions for node {} (of type {})", nodePtr->name(), nodePtr->type());
+                    }
                     nextList.insert(nodePtr);
                 }
             }
@@ -614,7 +633,7 @@ void Aidge::GraphView::setInputId(Aidge::IOIndex_t /*inID*/,
 }
 
 void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnableParam) {
-  AIDGE_ASSERT(node != nullptr, "Trying to add non-existant node!");
+  AIDGE_ASSERT(node != nullptr, "Trying to add non-existent node!");
 
   // first node to be added to the graph is the root node by default
   if (mRootNode == nullptr) {
@@ -806,7 +825,7 @@ bool Aidge::GraphView::add(std::set<std::shared_ptr<Node>> otherNodes, bool incl
     mRootNode = *noParentNodes.begin();
 
     if (noParentNodes.size() > 1) {
-      // If there is more than one, order unicity cannot be garanteed!
+      // If there is more than one, order unicity cannot be guaranteed!
       orderUnicity = false;
     }
 
@@ -909,7 +928,7 @@ void Aidge::GraphView::addChild(
   // assert input node is valid
   if (!toNode.first) {
     assert(toOtherView->inputNodes().size() == 1U &&
-           "If no intput node is provided, the other graph should have only "
+           "If no input node is provided, the other graph should have only "
            "one to make the choice explicit.");
     toNode.first = *(toOtherView->inputNodes().begin());
   } else {
@@ -1030,7 +1049,7 @@ void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnab
 
 
 bool Aidge::GraphView::swap(Node & /*node*/, Node & /*otherNode*/) {
-  fmt::print("Swap() not implementated yet. Return false.\n");
+  fmt::print("Swap() not implemented yet. Return false.\n");
   return false;
 }
 
@@ -1221,7 +1240,6 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
         if (removeFromGraphs) {
             for (const auto& g : commonGraphViews) {
                 g -> remove(nodePtr, false);
-                g -> updateInputsOutputsDelete(nodePtr);
             }
             nodePtr -> resetConnections(true);
         }
@@ -1461,7 +1479,7 @@ void Aidge::GraphView::updateInputsOutputsDelete(std::shared_ptr<Node> deletedNo
 std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*cloneNode)(NodePtr)) const {
   std::shared_ptr<GraphView> newGraph = std::make_shared<GraphView>(mName);
 
-  // Map for old node -> new node correspondance
+  // Map for old node -> new node correspondence
   std::map<NodePtr, NodePtr> oldToNewNodes;
 
   for (const std::shared_ptr<Node> &node_ptr : mNodes) {
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index b2ceb903d51dbb880979cd2191825a6310f9e5ff..da6d833f3aa933cd5e707814c279142de5bc4a23 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -18,9 +18,10 @@
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/future_std/any.hpp"
 
-Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
-    : mName(name),
+Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs)
+    : mAttrs(attrs),
       mOperator(op),
       mParents(std::vector<std::shared_ptr<Node>>(static_cast<std::size_t>(op->nbInputs()),
                                                   nullptr)),
@@ -31,11 +32,18 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
       mIdOutParents(
               std::vector<IOIndex_t>(static_cast<std::size_t>(op->nbInputs()), gk_IODefaultIndex))
 {
-    // ctor
-    if (op) {
-        mForward.push_back([this](){ this->mOperator->forward(); return true; });
-        mBackward.push_back([this](){ this->mOperator->backward(); return true; });
-    }
+    mForward.push_back([this](){ this->mOperator->forward(); return true; });
+    // mForward.push_back(std::bind(&Operator::forward, mOperator.get()));
+    mBackward.push_back([this](){ this->mOperator->backward(); return true; });
+}
+
+// Aidge::Node::Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs)
+//     : Node(op, std::make_shared<DynamicAttributes>(attrs)) {}
+
+Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
+    : Node(op, std::make_shared<DynamicAttributes>(std::map<std::string, future_std::any>({std::make_pair("name", future_std::any(name))})))
+{
+    //ctor
 }
 
 ///////////////////////////////////////////////////////
@@ -70,7 +78,7 @@ Aidge::Connector Aidge::Node::operator()(const std::vector<Connector>& ctors) {
 
 void Aidge::Node::setName(const std::string& name) {
     for (auto graphView : views()) graphView->updateNodeName(shared_from_this(), name);
-    mName = name;
+    mAttrs->setAttr<std::string>("name", name);
 }
 
 std::string Aidge::Node::createUniqueName(std::string baseName)
@@ -399,18 +407,18 @@ void Aidge::Node::resetConnections(bool includeLearnableParam) {
 ///////////////////////////////////////////////////////
 
 Aidge::NodePtr Aidge::Node::cloneSharedOperators() const {
-    return std::make_shared<Node>(mOperator, mName);
+    return std::make_shared<Node>(mOperator, mAttrs);
 }
 
 Aidge::NodePtr Aidge::Node::cloneSharedProducers() const {
     std::shared_ptr<Operator> op =
             (mOperator->type() == Producer_Op::Type) ? mOperator : mOperator->clone();
 
-    return std::make_shared<Node>(op, mName);
+    return std::make_shared<Node>(op, mAttrs);
 }
 
 Aidge::NodePtr Aidge::Node::clone() const {
-    return std::make_shared<Node>(mOperator->clone(), mName);
+    return std::make_shared<Node>(mOperator->clone(), mAttrs);
 }
 
 std::set<Aidge::NodePtr> Aidge::Node::getNodeDelta(int delta, std::set<Aidge::NodePtr> nodeSee) {
diff --git a/src/graph/StaticAnalysis.cpp b/src/graph/StaticAnalysis.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..033e51022842983caacba9385248c9f02c1e5568
--- /dev/null
+++ b/src/graph/StaticAnalysis.cpp
@@ -0,0 +1,171 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/graph/StaticAnalysis.hpp"
+
+Aidge::OperatorStats::OperatorStats(const Operator& op)
+  : mOp(op)
+{
+    //ctor
+}
+
+size_t Aidge::OperatorStats::getNbArithmIntOps() const {
+    const auto opTensor = dynamic_cast<const OperatorTensor*>(&mOp);
+    if (opTensor) {
+        if (!isDataTypeFloatingPoint(opTensor->getOutput(0)->dataType())) {
+            return getNbArithmOps();
+        }
+    }
+    return 0;
+}
+
+Aidge::StaticAnalysis::StaticAnalysis(std::shared_ptr<GraphView> graph)
+  : mGraph(graph)
+{
+    //ctor
+}
+
+void Aidge::StaticAnalysis::summary(bool incProducers) const {
+    fmt::println("--------------------------------------------------------------------------------");
+    fmt::println("                        Layer (type)               Output Shape         Param #");
+    fmt::println("================================================================================");
+
+    size_t nbParams = 0;
+    size_t paramsSize = 0;  // Size in bits
+    size_t fwdBwdSize = 0;  // Size in bits
+
+    const auto namePtrTable = mGraph->getRankedNodesName("{0} ({1}#{3})");
+    for (const auto node : mGraph->getOrderedNodes()) {
+        if (node->type() == Producer_Op::Type && !incProducers) {
+            continue;
+        }
+
+        auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator());
+        std::string outputDimsStr = fmt::format("{: >27}", "?");
+        if (opTensor) {
+            const auto outputDims = opTensor->getOutput(0)->dims();
+            outputDimsStr = fmt::format("{: >27}", fmt::format("{}", outputDims));
+  
+            for (size_t out = 0; out < node->nbOutputs(); ++out) {
+                const auto output = opTensor->getOutput(out);
+                if (output && node->type() != Producer_Op::Type) {
+                    fwdBwdSize += output->size()
+                        * getDataTypeBitWidth(output->dataType());
+                }
+            }
+        }
+
+        nbParams += getNbParams(node);
+        paramsSize += getParamsSize(node);
+        fmt::println("{: >36}{}{: >16}",
+          namePtrTable.at(node), outputDimsStr, getNbParams(node));
+    }
+
+    size_t inputSize = 0;  // Size in bits
+    for (const auto input : mGraph->getOrderedInputs()) {
+        if (input.first) {
+            auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator());
+            if (opTensor && opTensor->getInput(input.second)) {
+                inputSize += opTensor->getInput(input.second)->size()
+                    * getDataTypeBitWidth(opTensor->getInput(input.second)->dataType());
+            }
+        }
+    }
+
+    fmt::println("================================================================================");
+    fmt::println("Total params: {}", nbParams);
+    fmt::println("--------------------------------------------------------------------------------");
+    fmt::println("Input size (MB): {}", inputSize / 8.0 / 1024 / 1024);
+    fmt::println("Forward/backward pass size (MB): {}", fwdBwdSize / 8.0 / 1024 / 1024);
+    fmt::println("Params size (MB): {}", paramsSize / 8.0 / 1024 / 1024);
+    fmt::println("Estimated Total Size (MB): {}", (inputSize + fwdBwdSize + paramsSize) / 8.0 / 1024 / 1024);
+    fmt::println("--------------------------------------------------------------------------------");
+}
+
+size_t Aidge::StaticAnalysis::getNbParams(std::shared_ptr<Node> node) const {
+    const auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator());
+
+    size_t nbParams = 0;
+
+    // Look for Producers directly attached to the node's inputs.
+    size_t i = 0;
+    for (auto parent : node->inputs()) {
+        if (parent.first && mGraph->inView(parent.first)) {
+            if (parent.first->type() == Producer_Op::Type && opTensor->getInput(i)) {
+                nbParams += opTensor->getInput(i)->size();
+            }
+        }
+        ++i;
+    }
+
+    // Look for internal Producers, in case of meta-op.
+    if (!node->getOperator()->isAtomic()) {
+        const auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(node->getOperator())->getMicroGraph();
+        for (const auto internalNode : microGraph->getNodes()) {
+            if (internalNode->type() == Producer_Op::Type) {
+                const auto internalOpTensor = std::dynamic_pointer_cast<OperatorTensor>(internalNode->getOperator());
+                nbParams += internalOpTensor->getOutput(0)->size();
+            }
+        }
+    }
+
+    return nbParams;
+}
+
+size_t Aidge::StaticAnalysis::getParamsSize(std::shared_ptr<Node> node) const {
+    const auto opTensor = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator());
+
+    size_t paramsSize = 0;
+
+    // Look for Producers directly attached to the node's inputs.
+    size_t i = 0;
+    for (auto parent : node->inputs()) {
+        if (parent.first && mGraph->inView(parent.first)) {
+            if (parent.first->type() == Producer_Op::Type && opTensor->getInput(i)) {
+                paramsSize += opTensor->getInput(i)->size()
+                    * getDataTypeBitWidth(opTensor->getInput(i)->dataType());
+            }
+        }
+        ++i;
+    }
+
+    // Look for internal Producers, in case of meta-op.
+    if (!node->getOperator()->isAtomic()) {
+        const auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(node->getOperator())->getMicroGraph();
+        for (const auto internalNode : microGraph->getNodes()) {
+            if (internalNode->type() == Producer_Op::Type) {
+                const auto internalOpTensor = std::dynamic_pointer_cast<OperatorTensor>(internalNode->getOperator());
+                paramsSize += internalOpTensor->getOutput(0)->size()
+                    * getDataTypeBitWidth(internalOpTensor->getOutput(0)->dataType());
+            }
+        }
+    }
+
+    return paramsSize;
+}
+
+std::shared_ptr<Aidge::OperatorStats> Aidge::StaticAnalysis::getOpStats(std::shared_ptr<Node> node) const {
+    return (Registrar<OperatorStats>::exists(node->type()))
+        ? Registrar<OperatorStats>::create(node->type())(*(node->getOperator()))
+        : (node->getOperator()->isAtomic())
+            ? std::make_shared<OperatorStats>(*(node->getOperator()))
+            : std::make_shared<MetaOpStats>(*(node->getOperator()));
+}
+
+size_t Aidge::StaticAnalysis::accumulate(size_t (OperatorStats::*func)() const) const {
+    return std::accumulate(
+        mGraph->getNodes().cbegin(),
+        mGraph->getNodes().cend(),
+        std::size_t(0),
+        [this, func](const size_t& lhs, const std::shared_ptr<Node>& rhs) {
+            return lhs + (this->getOpStats(rhs).get()->*func)();
+        });
+}
diff --git a/src/graphRegex/GraphFsmInterpreter.cpp b/src/graphRegex/GraphFsmInterpreter.cpp
index 18b768c6567e64caf6841ed4a339f13fd16f69d6..a2f07129c468c737da022de8a6d1b093cdd08e39 100644
--- a/src/graphRegex/GraphFsmInterpreter.cpp
+++ b/src/graphRegex/GraphFsmInterpreter.cpp
@@ -145,7 +145,7 @@ std::shared_ptr<FsmGraph> GraphFsmInterpreter::qomF(std::shared_ptr<FsmGraph> fs
                         edge = FsmEdgeFactory::make(valid,start,FsmEdgeTypes::REF,mNodesCondition, lexem.str());
                     }else{
                         /*
-                        the sequencial quantify case 
+                        the sequential quantify case 
                         no reference to common 
                         */
                         edge = FsmEdgeFactory::make(valid,start,FsmEdgeTypes::EMPTY,mNodesCondition,"");
@@ -165,7 +165,7 @@ std::shared_ptr<FsmGraph> GraphFsmInterpreter::qomF(std::shared_ptr<FsmGraph> fs
 
 std::shared_ptr<FsmGraph> GraphFsmInterpreter::qzmF(std::shared_ptr<FsmGraph> fsm){
         /*
-        qomf and a bypass empty start to valide 
+        qomf and a bypass empty start to valid 
         */
     fsm = qomF(fsm);
 
diff --git a/src/graphRegex/GraphLexer.cpp b/src/graphRegex/GraphLexer.cpp
index f504ad025940c88058ce5949259c464ae2cedfb6..05a23d02cdbfe072337ea2cc6ed92410e914257b 100644
--- a/src/graphRegex/GraphLexer.cpp
+++ b/src/graphRegex/GraphLexer.cpp
@@ -79,7 +79,7 @@ std::shared_ptr<ParsingToken<gRegexTokenTypes>> GraphLexer::getNextToken(void){
 
                 if(!std::regex_match(currentChars,keyRegex) && !std::regex_match(currentChars,cKeyRegex))
                 {
-                    currentChars.pop_back(); //the last char is the problemes
+                    currentChars.pop_back(); //the last char is the problems
                     break;
                 }
                 else if (std::regex_match(currentChars,cKeyRegex)){
@@ -89,7 +89,7 @@ std::shared_ptr<ParsingToken<gRegexTokenTypes>> GraphLexer::getNextToken(void){
                 if (mPosition < mRegularExpressions.length()) currentChars += mRegularExpressions[mPosition];
                 
             }
-            //we end the match 2 posibility 
+            //we end the match 2 possibility 
             //we are at the end of the mConditionalExpressions and we need to ensure the match
             //we are not we can continu
             if (mPosition == mRegularExpressions.length()-1)
diff --git a/src/graphRegex/matchFsm/FsmEdge.cpp b/src/graphRegex/matchFsm/FsmEdge.cpp
index 638aad3bc3f5c94d5b20420ed8cc0799daa08cc0..170d5e69366d25ba19ea2f514cab6cd59b545ec0 100644
--- a/src/graphRegex/matchFsm/FsmEdge.cpp
+++ b/src/graphRegex/matchFsm/FsmEdge.cpp
@@ -141,7 +141,7 @@ FsmEdge::FsmEdge(std::shared_ptr<FsmNode>& source,std::shared_ptr<FsmNode>& dest
 {
     mNodeSource = source;
     mNodeDest   = dest;
-    // wen i make the edge I init the nodes
+    // when i make the edge I init the nodes
     // mNodeSource->addEdge(shared_from_this());
     // mNodeDest->addParent(mNodeSource);
 }
diff --git a/src/graphRegex/matchFsm/FsmGraph.cpp b/src/graphRegex/matchFsm/FsmGraph.cpp
index a56474e042cc44a68938b1d19e19a0c6841cb8cb..2ba11a4d26b933bdbfad5c91d127bfa2682473d4 100644
--- a/src/graphRegex/matchFsm/FsmGraph.cpp
+++ b/src/graphRegex/matchFsm/FsmGraph.cpp
@@ -33,7 +33,7 @@ FsmGraph::FsmGraph(const std::string query):mQuery(query){
         for(auto fsmContext : walks){
             allContextSee.push_back(fsmContext);
             //if we are in a valid st we save it
-            //it's one solution of the posible solution of the matching
+            //it's one solution of the possible solution of the matching
             if(fsmContext->isOnValidState()){
                 //not save 2 time the same end point
                 if(!std::any_of(allValidContext.begin(), allValidContext.end(),
@@ -45,7 +45,7 @@ FsmGraph::FsmGraph(const std::string query):mQuery(query){
 
             }
 
-            //dont test 2 time a fsmContext
+            //don't test 2 time a fsmContext
             std::vector<std::shared_ptr<FsmRunTimeContext>> tmpNextWalks = fsmContext->getActState()->test(fsmContext);
             for(auto PotentialFsmContext : tmpNextWalks){
 
@@ -135,15 +135,15 @@ void FsmGraph::mergeOneStartOneValid(const std::shared_ptr<FsmGraph> fsmGraph){
     if (startNodes.size() != 1 || validNodes.size() != 1){
 
         std::ostringstream errorMessage;
-        errorMessage <<"mergeOneStartOneValid  start size: " << startNodes.size() << " valide size : " << validNodes.size()
-        <<" can only merge FSM 1 start 1 valide";
+        errorMessage <<"mergeOneStartOneValid  start size: " << startNodes.size() << " valid size : " << validNodes.size()
+        <<" can only merge FSM 1 start 1 valid";
         throw std::runtime_error(errorMessage.str());
     }
 
     unionG(fsmGraph);
-    //for loop useless but for future merge it's coudl be used
+    //for loop useless but for future merge it's could be used
     for(auto valid : validNodes){
-        valid->unValid();
+        valid->invalid();
         for(auto start : startNodes){
             start->unStart();
             _mergeNode(start,valid);
@@ -179,7 +179,7 @@ void FsmGraph::_mergeNode(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNod
     }
     nodes.clear();
 
-    //probagate source attribut
+    //probagate source attribute
     if(source->isValid()){
         dest->valid();
     }
diff --git a/src/graphRegex/matchFsm/FsmNode.cpp b/src/graphRegex/matchFsm/FsmNode.cpp
index 7bc4cf105b43a540bd0e9c686af35dd220611a09..6666d1a72a298f20bdae0eb1c51805e5ae133ba4 100644
--- a/src/graphRegex/matchFsm/FsmNode.cpp
+++ b/src/graphRegex/matchFsm/FsmNode.cpp
@@ -103,7 +103,7 @@ bool FsmNode::isValid(void){
 bool FsmNode::isStart(void){
     return mIsAStart;
 }
-void FsmNode::unValid(void){
+void FsmNode::invalid(void){
     mIsAValid =false;
 }
 void FsmNode::valid(void){
diff --git a/src/graphRegex/matchFsm/FsmRunTimeContext.cpp b/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
index 7a09908e5629e299b6b264fbfaac97bdaf7fa316..89e7faf205ef515049de415a5f057db8a13105e9 100644
--- a/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
+++ b/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
@@ -74,7 +74,7 @@ bool FsmRunTimeContext::isAlreadyValid(NodePtr node){
 bool FsmRunTimeContext::areCompatible(std::shared_ptr<FsmRunTimeContext> fsmContext){
     /*
     see if 2 context can be merge
-    it need to have different  mValidNodes exept for common
+    it need to have different  mValidNodes except for common
     and the same idx for the common
     */
 
@@ -192,9 +192,9 @@ std::set<NodePtr> FsmRunTimeContext::getValidNodes(void){
 
 std::set<NodePtr> FsmRunTimeContext::getValidNodesNoCommon(void){
     std::set<NodePtr> differenceSet;
-    std::set<NodePtr> valide = getValidNodes();
+    std::set<NodePtr> valid = getValidNodes();
     std::set<NodePtr> common = getCommonNodes();
-    std::set_difference(valide.begin(), valide.end(), common.begin(), common.end(),std::inserter(differenceSet, differenceSet.end()));
+    std::set_difference(valid.begin(), valid.end(), common.begin(), common.end(),std::inserter(differenceSet, differenceSet.end()));
     return differenceSet;
 }
 
diff --git a/src/nodeTester/ConditionalInterpreter.cpp b/src/nodeTester/ConditionalInterpreter.cpp
index f40e62305334f740057f88ef21cdab749d64bd99..5d10762d93d2bc0e92bf9d15bb24255bb7e51768 100644
--- a/src/nodeTester/ConditionalInterpreter.cpp
+++ b/src/nodeTester/ConditionalInterpreter.cpp
@@ -8,11 +8,11 @@ using namespace Aidge;
 //ConditionalRegisterFunction
 ///////////////////////////////
 
-     std::shared_ptr<ConditionalData> ConditionalRegisterFunction::run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & datas){
+     std::shared_ptr<ConditionalData> ConditionalRegisterFunction::run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & data){
 
         auto lambdaIt = mWlambda.find(key);
         if (lambdaIt != mWlambda.end()) {
-            return lambdaIt->second(datas);
+            return lambdaIt->second(data);
         }else {
             throw std::runtime_error("can not run Lambda due to invalid key: " + key);
         }
@@ -174,7 +174,7 @@ using namespace Aidge;
                         case ConditionalTokenTypes::RPAREN:
                         case ConditionalTokenTypes::STOP:
                         default:
-                            throw std::runtime_error("NODE TYPE NOT SUPORTED IN ConditionalInterpreter");
+                            throw std::runtime_error("NODE TYPE NOT SUPPORTED IN ConditionalInterpreter");
                     }
                 }catch(const std::exception& e){
                     std::ostringstream errorMessage;
@@ -188,7 +188,7 @@ using namespace Aidge;
 
 
     //////////////////////
-    //value convertor
+    //value converter
     /////////////////////
 
 
diff --git a/src/nodeTester/ConditionalLexer.cpp b/src/nodeTester/ConditionalLexer.cpp
index e70772fc1a5d6136fb56f5981d73bf6cb0622991..9cc480ab29e84a775d9e275fe6ba51dc11e6ea14 100644
--- a/src/nodeTester/ConditionalLexer.cpp
+++ b/src/nodeTester/ConditionalLexer.cpp
@@ -28,7 +28,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
             mPosition++;
             continue;
         }
-        //performe tokenisation, find a regex and make a new token
+        //perform tokenisation, find a regex and make a new token
         
         if (std::regex_match(currentChars,std::regex("\\&\\&")))// the AND TOKEN 
         {
@@ -86,7 +86,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
         //non const lent token
         /////
 
-        //LAMBDA, KEY , bool //the fuction TAG 
+        //LAMBDA, KEY , bool //the function TAG 
         else if (std::regex_match(currentChars,std::regex("[A-Za-z_]")))// the KEY TOKEN (a char next )
         {   
             //read all the key 
@@ -97,7 +97,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
             while ( mPosition < mConditionalExpressions.length()) {
                 if(!std::regex_match(currentChars,keyRegex) && !std::regex_match(currentChars,LambdaRegex))
                 {
-                    currentChars.pop_back(); //the last char is the problemes
+                    currentChars.pop_back(); //the last char is the problems
                     break;
                 }
                 else if (std::regex_match(currentChars,LambdaRegex)){
@@ -107,7 +107,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
                 if (mPosition < mConditionalExpressions.length()) currentChars += mConditionalExpressions[mPosition];
                 //currentChars += mConditionalExpressions[mPosition];
             }
-            //we end the match 2 posibility 
+            //we end the match 2 possibility 
             //we are at the end of the mConditionalExpressions and we need to ensure the match
             //we are not we can continu
             if (mPosition == mConditionalExpressions.length()-1)
@@ -116,7 +116,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
                 {
                     throw badTokenError(currentChars,mPosition);
                 }
-                //mPosition++; // we stop all by going pos > lengt
+                //mPosition++; // we stop all by going pos > length
             }
 
 
@@ -153,7 +153,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
                 if (mPosition < mConditionalExpressions.length()) currentChars += mConditionalExpressions[mPosition];
                 //currentChars += mConditionalExpressions[mPosition];
             }
-            //we end the match 2 posibility 
+            //we end the match 2 possibility 
             //we are at the end of the mConditionalExpressions and we need to ensure the match
             //we are not we can continu
             if (mPosition == mConditionalExpressions.length()-1)
@@ -189,7 +189,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
                 if (!std::regex_match(currentChars,strRegex)){
                      throw badTokenError(currentChars,mPosition);
                 }
-                //mPosition++; // we stop all by going pos > lengt
+                //mPosition++; // we stop all by going pos > length
             }
 
             mPosition++; // go after the last " 
diff --git a/src/nodeTester/ConditionalParser.cpp b/src/nodeTester/ConditionalParser.cpp
index ba40c561375e0c09eb86009d447a782ab99d5d0b..5cf6f8617612b09c1a61e694a56dc6ed4d0f2b39 100644
--- a/src/nodeTester/ConditionalParser.cpp
+++ b/src/nodeTester/ConditionalParser.cpp
@@ -76,7 +76,7 @@ std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::Conditional
         return constructAstLambda();
     }
 
-   throw std::runtime_error("ConditionalParser unknow val type "+ token->rep().str() + "\n" + mLexer.rep());
+   throw std::runtime_error("ConditionalParser unknown val type "+ token->rep().str() + "\n" + mLexer.rep());
 
 }
 
@@ -169,7 +169,7 @@ std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::Conditional
 
         std::shared_ptr<AstNode<ConditionalTokenTypes>> right = constructAstExpr(prec);
 
-        //i'm not sur what append to newNode
+        //i'm not sure what append to newNode
         //std::shared_ptr<AstNode<ConditionalTokenTypes>> newNode = std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{left,constructAstCmpr()});
         std::shared_ptr<AstNode<ConditionalTokenTypes>> newNode = std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{left,right});
         left = newNode;
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 033c476c8a9e865fdf9d5670e295c3e4fb6101b3..f6fd0cd9fc647e29402d36f1f6838642e099ae6c 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -22,12 +22,10 @@
 
 const std::string Aidge::Add_Op::Type = "Add";
 
-Aidge::Add_Op::Add_Op(const IOIndex_t nbIn)
-    : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
+Aidge::Add_Op::Add_Op()
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1)
 {
-    if (nbIn == 0) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
-    }
+    // ctor
 }
 
 Aidge::Add_Op::Add_Op(const Add_Op& op)
@@ -89,6 +87,8 @@ std::set<std::string> Aidge::Add_Op::getAvailableBackends() const {
     return Registrar<Add_Op>::getKeys();
 }
 
-std::shared_ptr<Aidge::Node> Aidge::Add(const IOIndex_t nbIn, const std::string& name) {
-    return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
+////////////////////////////////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Add(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Add_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Atan.cpp b/src/operator/Atan.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c0a494ee66fb11bcccac21141da30df5546f0b3c
--- /dev/null
+++ b/src/operator/Atan.cpp
@@ -0,0 +1,53 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Atan.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Atan_Op::Type = "Atan";
+
+Aidge::Atan_Op::Atan_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::Atan_Op::Atan_Op(const Aidge::Atan_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Atan_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Atan_Op::clone() const {
+    return std::make_shared<Atan_Op>(*this);
+}
+
+
+void Aidge::Atan_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<Atan_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Atan_Op::getAvailableBackends() const {
+    return Registrar<Atan_Op>::getKeys();
+}
+
+///////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Atan(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Atan_Op>(), name);
+}
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index f8c8e5e3f32fff8306184dfdf3baa87392479ebf..78266e3fb391d6f33da9e65b2125dd57885ac89e 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -24,7 +24,7 @@
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
+const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling" + std::to_string(DIM) + "D";
 
 
 template <Aidge::DimIdx_t DIM>
@@ -134,4 +134,4 @@ std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t
 }
 template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&);
 template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&);
-template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&);
\ No newline at end of file
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&);
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index bcf3b29c45abe2c40788fd1ec0bad87db8ee227b..24a49e56c755ef46404881511cfd6af89628e251 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -24,7 +24,7 @@
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm";
+const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm" + std::to_string(DIM) + "D";
 
 template <Aidge::DimIdx_t DIM>
 Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
@@ -108,9 +108,10 @@ template <Aidge::DimSize_t DIM>
 inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const Aidge::DimSize_t nbFeatures,
                                        const float epsilon,
                                        const float momentum,
+                                       const bool trainingMode,
                                        const std::string& name) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by BatchNorm, not supported");
-    auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum), name);
+    auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum, trainingMode), name);
     addProducer(batchNorm, 1, {nbFeatures}, "scale");
     addProducer(batchNorm, 2, {nbFeatures}, "shift");
     addProducer(batchNorm, 3, {nbFeatures}, "batch_mean");
@@ -118,6 +119,6 @@ inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const Aidge::DimSize_t nbFe
     return batchNorm;
 }
 
-template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&);
-template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&);
-template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&);
\ No newline at end of file
+template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const bool, const std::string&);
+template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const bool, const std::string&);
+template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const bool, const std::string&);
diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..10b864b54594c86ed1486611fdd91fd916f2291b
--- /dev/null
+++ b/src/operator/Clip.cpp
@@ -0,0 +1,93 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Clip.hpp"
+
+#include <memory>
+#include <string>
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/operator/Clip.hpp"
+
+const std::string Aidge::Clip_Op::Type = "Clip";
+
+bool Aidge::Clip_Op::dimsForwarded() const {
+    if ((getInput(1) && !getInput(1)->undefined())
+        || (getInput(2) && !getInput(2)->undefined()))
+    {
+        // output dims are data dependent
+        return false;
+    }
+
+    return OperatorTensor::dimsForwarded();
+}
+
+
+bool Aidge::Clip_Op::forwardDims(bool allowDataDependency) 
+{
+    if (getInput(1) ) 
+    { 
+        if( this->min() != std::numeric_limits<float>::lowest())
+        {
+            Log::notice("{} : ignoring non-empty min attribute because input#1 "
+                  "take precedence",
+                  type());
+        }
+        if (!allowDataDependency) {
+        Log::warn("{} : unable to forwardDims() because output dims are data "
+                    "dependent on input#1",
+                    type());
+        return false;
+        }
+        std::shared_ptr<Tensor> fallback;
+        const auto& minV = mInputs[1]->refCastFrom(fallback, NativeType<float>::type, "cpu");
+        this->min() = *(static_cast<float*>(minV.getImpl()->hostPtr()));
+    }
+    if (getInput(2)) 
+    { 
+       if( this->max() != std::numeric_limits<float>::max())
+        {
+            Log::notice("{} : ignoring non-empty max attribute because input#2 "
+                  "take precedence",
+                  type());
+        }
+        if (!allowDataDependency) {
+        Log::warn("{} : unable to forwardDims() because output dims are data "
+                    "dependent on input#2",
+                    type());
+        return false;
+        }
+        std::shared_ptr<Tensor> fallback;
+        const auto& maxV = mInputs[2]->refCastFrom(fallback, NativeType<float>::type, "cpu");
+        this->max() = *(static_cast<float*>(maxV.getImpl()->hostPtr()));
+    }
+    if (!inputsAssociated(false)) {
+        return false;
+    }
+    else if ((getInput(1) && !getInput(1)->empty())  || (getInput(2) && !getInput(2)->empty()))
+    {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,"Expected Input#1 and Input#2 to be scalar (Tensors of empty shapes)");
+    }
+    mOutputs[0] -> resize(getInput(0)->dims());
+    return true;
+}
+void Aidge::Clip_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<Clip_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
+std::set<std::string> Aidge::Clip_Op::getAvailableBackends() const {
+    return Registrar<Clip_Op>::getKeys();
+}
+std::shared_ptr<Aidge::Node> Aidge::Clip(const std::string &name,float min,float max)
+{
+    return std::make_shared<Node>(std::make_shared<Clip_Op>(min, max), name);
+}
\ No newline at end of file
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 55efdd51d56f7db4f64880b967def661e5354af5..27b9d1cf151c1d12aa4395a3b24673a2f2a4ad3c 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -49,7 +49,9 @@ std::shared_ptr<Aidge::Operator> Aidge::Concat_Op::clone() const {
 
 void Aidge::Concat_OpImpl::forward() {
     const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
-    const DimSize_t axis = op.axis();
+    auto axis = op.axis();
+    const auto nbDimsInput0 = op.getInput(0)->nbDims();
+    axis = (axis < 0) ? axis + static_cast<std::int32_t>(nbDimsInput0) : axis;
 
     assert(op.getInput(0) && "missing input in Concat operator");
     for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) {
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index e055c7e5ebb9a6cff9f774da444cc582ed7de34c..836c47645c20ff23539b836af8593cddfbb48498 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -24,7 +24,7 @@
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Conv_Op<DIM>::Type = "Conv";
+const std::string Aidge::Conv_Op<DIM>::Type = "Conv" + std::to_string(DIM) + "D";
 
 template <Aidge::DimIdx_t DIM>
 Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index f4d524356bd207a7ed101c2887c2fcda53f3bb83..d2a1c9e3d08d8e2c0400d436c6123aeb5f7ce66b 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -25,7 +25,7 @@
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
+const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise" + std::to_string(DIM) + "D";
 
 template <Aidge::DimIdx_t DIM>
 Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index 99ccb7505cd959178e4bd7132e32552ea5a72ecf..1e1db2f94948dfd1dd4c6219419b7989eeac8b3a 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -24,7 +24,7 @@
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Fold_Op<DIM>::Type = "Fold";
+const std::string Aidge::Fold_Op<DIM>::Type = "Fold" + std::to_string(DIM) + "D";
 
 template <Aidge::DimIdx_t DIM>
 Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op)
@@ -102,4 +102,4 @@ std::shared_ptr<Aidge::Node> Aidge::Fold(const std::array<Aidge::DimSize_t, DIM>
     return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
 }
 
-template std::shared_ptr<Aidge::Node> Aidge::Fold<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
+template std::shared_ptr<Aidge::Node> Aidge::Fold<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&);
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index e8c66085de5bc7c808b7f2307a9a82b22a426bb2..c5bca92406e518df593fcc6c3a40525a4ba81dfa 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -43,6 +43,7 @@ Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
 
 Aidge::GenericOperator_Op::GenericOperator_Op(const Aidge::GenericOperator_Op& op)
     : OperatorTensor(op),
+        mForwardDims(op.mForwardDims),
         mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
 {
     mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
@@ -85,7 +86,15 @@ bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
 }
 
 void Aidge::GenericOperator_Op::setBackend(const std::string & name, DeviceIdx_t device) {
-    Log::warn("GenericOperator::setBackend(): cannot set backend for a generic operator, as no implementation has been provided!");
+    if (Registrar<GenericOperator_Op>::exists({name, type()})) {
+        // A custom implementation exists for this meta operator
+        mImpl = Registrar<GenericOperator_Op>::create({name, type()})(*this);
+    }else{
+        Log::warn("GenericOperator::setBackend(): cannot set backend for a generic operator, as no implementation has been provided!");
+    }
+
+
+
 
     for (std::size_t i = 0; i < nbOutputs(); ++i) {
         mOutputs[i]->setBackend(name, device);
@@ -107,4 +116,4 @@ std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
                                                 Aidge::IOIndex_t nbOut,
                                                 const std::string& name) {
     return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
-}
\ No newline at end of file
+}
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index bbcfd0d28ca039318647d206af876727793e1bfc..57886ec2faec86bc5d3a515ed685fdcfd0e15e4e 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -44,8 +44,10 @@ bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
         // Global average pooling takes each filter, averages its values and uses
         // it as an output(Much like a fancier flatten). 1st dim is batch 2nd is
         // number of filter
-        mOutputs[0]->resize({getInput(0)->dims().at(0),
-                             getInput(0)->dims().at(1)});
+        std::vector<DimSize_t> outputDims(getInput(0)->nbDims(), 1);
+        outputDims[0] = getInput(0)->dims()[0];
+        outputDims[1] = getInput(0)->dims()[1];
+        mOutputs[0]->resize(outputDims);
         return true;
     }
 
diff --git a/src/operator/Heaviside.cpp b/src/operator/Heaviside.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9ecb3b436d8312ef479d6bc0592cfe372235fa25
--- /dev/null
+++ b/src/operator/Heaviside.cpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>   // std::size_t
+#include <memory>
+#include <stdexcept> // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Heaviside.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+// ----------------------------------------------------------- Heaviside_Op
+// class
+
+const std::string Heaviside_Op::Type = "Heaviside";
+
+Heaviside_Op::Heaviside_Op(float value)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+      mAttributes(
+          std::make_shared<Attributes_>(attr<HeavisideAttr::Value>(value))) {}
+
+Heaviside_Op::Heaviside_Op(const Heaviside_Op &op)
+    : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Heaviside_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Heaviside_Op::clone() const {
+    return std::make_shared<Heaviside_Op>(*this);
+}
+
+void Heaviside_Op::setBackend(const std::string &name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(Heaviside_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Heaviside_Op::getAvailableBackends() const {
+    return Registrar<Heaviside_Op>::getKeys();
+}
+
+// --------------------------------------------------------------- Free
+// functions
+
+NodePtr Heaviside(float value, const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Heaviside_Op>(value), name);
+}
+
+} // namespace Aidge
diff --git a/src/operator/LRN.cpp b/src/operator/LRN.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c5ce243bd6a48ae4b1ce8461924b498c804b53e6
--- /dev/null
+++ b/src/operator/LRN.cpp
@@ -0,0 +1,60 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/LRN.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::LRN_Op::Type = "LRN";
+
+Aidge::LRN_Op::LRN_Op(std::int32_t size)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<LRNAttr::Alpha>(0.0001),
+        attr<LRNAttr::Beta>(0.75),
+        attr<LRNAttr::Bias>(1.0),
+        attr<LRNAttr::Size>(size)))
+{}
+
+Aidge::LRN_Op::LRN_Op(const Aidge::LRN_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(LRN_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::LRN_Op::clone() const {
+    return std::make_shared<LRN_Op>(*this);
+}
+
+void Aidge::LRN_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<LRN_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::LRN_Op::getAvailableBackends() const {
+    return Registrar<LRN_Op>::getKeys();
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::LRN(std::int32_t size, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<LRN_Op>(size), name);
+}
\ No newline at end of file
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 668ffd04b7acb0e72b4a3313805fa89ca3466f32..8fd2aa068c91dfebd6d1a3a47900c3aa9b0f9585 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -71,7 +71,7 @@ bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
 
             std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
             for (std::size_t i = 0; i < dims_size-2; ++i) {
-                AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad vector dimension.");
+                AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad dimension {}: {} != {} for input #0 {} and #1 {}.", i, dims0[i], dims1[i], dims0, dims1);
                 outDims[i] = std::max(dims0[i], dims1[i]);
             }
 
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
index 5ce137fe6b6c0e4b7150bfc0f1182f6f8ee94850..535b53749caeffca34eb0bf541f06dee30a3a333 100644
--- a/src/operator/MaxPooling.cpp
+++ b/src/operator/MaxPooling.cpp
@@ -20,7 +20,7 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling";
+const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling" + std::to_string(DIM) + "D";
 
 template <Aidge::DimIdx_t DIM>
 Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 61239071a99a9dfca8613ef78eba17757c4276b7..cd4a4808137a786b04d8d143e50b96b9648e7d9a 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -150,7 +150,7 @@ void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
 }
 
 void Aidge::Memorize_Op::forward() {
-    Operator::forward();
+    OperatorTensor::forward();
     ++mAttributes->template getAttr<MemorizeAttr::ForwardStep>();
     mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
 }
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index e3acba9b4cccdf525d80f85344ba500cc7ac885f..cd307c9d15043d3ee5f5de48695e04e4ad2ada6b 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -20,17 +20,22 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
 
-Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph)
-    : OperatorTensor(type, [graph]() {
+Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph, const std::vector<InputCategory>& forcedInputsCategory)
+    : OperatorTensor(type, [graph, forcedInputsCategory]() {
+        IOIndex_t inputIdx = 0;
         std::vector<InputCategory> inputsCategory;
         for (const auto& in : graph->getOrderedInputs()) {
-            if (in.first) {
+            if (inputIdx < forcedInputsCategory.size()) {
+                inputsCategory.push_back(forcedInputsCategory[inputIdx]);
+            }
+            else if (in.first) {
                 inputsCategory.push_back(in.first->getOperator()->inputCategory(in.second));
             }
             else {
                 // Dummy input, default to OptionalData
                 inputsCategory.push_back(InputCategory::OptionalData);
             }
+            ++inputIdx;
         }
         return inputsCategory;
     }(), graph->getOrderedOutputs().size()),
@@ -43,10 +48,14 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar
             mOutputs[outputIdx] = std::dynamic_pointer_cast<Tensor>(outputOp.first->getOperator()->getRawOutput(outputOp.second));
         }
     }
+
+    for (const auto& node : mGraph->getRankedNodesName("{1}_{3}")) {
+        mAttributes->addAttr(node.second, node.first->getOperator()->attributes());
+    }
 }
 
 std::shared_ptr<Aidge::Operator> Aidge::MetaOperator_Op::clone() const {
-    return std::make_shared<MetaOperator_Op>(*this);
+    return std::make_shared<MetaOperator_Op>(type(), mGraph->clone());
 }
 
 void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) {
@@ -54,6 +63,7 @@ void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std:
     AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
 
     const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
+    AIDGE_ASSERT(inputOp.first, "associateInput(): inputIdx ({}) is a dummy input for this MetaOperator, cannot associate data!", inputIdx);
     inputOp.first->getOperator()->associateInput(inputOp.second, data);
 
     // Associate inputs for custom implementation
@@ -64,6 +74,7 @@ void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std
     AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
 
     const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
+    AIDGE_ASSERT(inputOp.first, "setInput(): inputIdx ({}) is a dummy input for this MetaOperator, cannot associate data!", inputIdx);
     inputOp.first->getOperator()->setInput(inputOp.second, data);
 
     // Associate inputs for custom implementation
@@ -80,12 +91,26 @@ void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceId
     if (Registrar<MetaOperator_Op>::exists({name, type()})) {
         // A custom implementation exists for this meta operator
         mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
+
+        // Set backend for in/out tensor of the MetaOp
+        for(auto i: mGraph->inputNodes()){
+            auto op_i = std::static_pointer_cast<OperatorTensor>(i->getOperator());
+            for(std::size_t in_idx=0; in_idx < op_i->nbInputs(); ++in_idx){
+                op_i->getInput(in_idx)->setBackend(name, device);
+            }
+        }
+        for(auto o: mGraph->outputNodes()){
+            auto op_o = std::static_pointer_cast<OperatorTensor>(o->getOperator());
+            for(std::size_t out_idx=0; out_idx < op_o->nbOutputs(); ++out_idx){
+                op_o->getOutput(out_idx)->setBackend(name, device);
+            }
+        }
+    }else{
+        // Input/output tensors backend are also updated here.
+        mGraph->setBackend(name, device);
     }
 
-    // The micro-graph should always be set to the right backend, since it
-    // shares input/output tensors.
-    // Input/output tensors backend are updated here.
-    mGraph->setBackend(name, device);
+
 }
 
 std::set<std::string> Aidge::MetaOperator_Op::getAvailableBackends() const {
@@ -98,22 +123,6 @@ std::set<std::string> Aidge::MetaOperator_Op::getAvailableBackends() const {
     return backendsList;
 }
 
-std::shared_ptr<Aidge::Attributes> Aidge::MetaOperator_Op::attributes() const {
-    auto attrs = std::make_shared<DynamicAttributes>();
-
-    for (const auto& node : mGraph->getRankedNodesName("{3}")) {
-        const auto attributes = node.first->getOperator()->attributes();
-        if (attributes) {
-            const auto nodeAttrs = DynamicAttributes(attributes->getAttrs());
-            attrs->addAttr(node.first->type() + "#" + node.second, nodeAttrs);
-            if (node.second == "0") {
-                attrs->addAttr(node.first->type(), nodeAttrs);
-            }
-        }
-    }
-
-    return attrs;
-}
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
     if (mImpl) {
@@ -243,10 +252,11 @@ void Aidge::MetaOperator_Op::forward() {
 
 std::shared_ptr<Aidge::Node> Aidge::MetaOperator(const char *type,
                                   const std::shared_ptr<Aidge::GraphView>& graph,
+                                  const std::vector<InputCategory>& forcedInputsCategory,
                                   const std::string& name)
 {
-    auto op = std::make_shared<MetaOperator_Op>(type, graph);
+    auto op = std::make_shared<MetaOperator_Op>(type, graph, forcedInputsCategory);
     auto node = std::make_shared<Node>(op, name);
     op->setUpperNode(node);
     return node;
-}
\ No newline at end of file
+}
diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp
index 910e7c67aad0068679ca2d240b23312add3e42d7..2ed548805010a6cc87950c4d1f7b89edbea4f75c 100644
--- a/src/operator/MetaOperatorDefs/LSTM.cpp
+++ b/src/operator/MetaOperatorDefs/LSTM.cpp
@@ -35,14 +35,14 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     auto input = Identity((!name.empty()) ? name + "_input" : "");
     auto hiddenState = Memorize(seqLength, (!name.empty()) ? name + "_hidden_state" : "");
     auto cellState = Memorize(seqLength, (!name.empty()) ? name + "_cell_state" : "");
-    auto add = Add(2, (!name.empty()) ? name + "_add" : "");
+    auto add = Add((!name.empty()) ? name + "_add" : "");
 
     // Forget gate
     auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateX" : "");
     input->addChild(forgetGateX, 0, 0);
     auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateH" : "");
     hiddenState->addChild(forgetGateH, 1, 0);
-    auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : "");
+    auto forgetGate = Add((!name.empty()) ? name + "_forgetGate" : "");
     forgetGateX->addChild(forgetGate, 0, 0);
     forgetGateH->addChild(forgetGate, 0, 1);
     auto forgetGateAct = Sigmoid((!name.empty()) ? name + "_forgetGateAct" : "");
@@ -57,7 +57,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     input->addChild(inputGateX, 0, 0);
     auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateH" : "");
     hiddenState->addChild(inputGateH, 1, 0);
-    auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : "");
+    auto inputGate = Add((!name.empty()) ? name + "_inputGate" : "");
     inputGateX->addChild(inputGate, 0, 0);
     inputGateH->addChild(inputGate, 0, 1);
     auto inputGateAct = Sigmoid((!name.empty()) ? name + "_inputGateAct" : "");
@@ -71,7 +71,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     input->addChild(cellCandidateX, 0, 0);
     auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateH" : "");
     hiddenState->addChild(cellCandidateH, 1, 0);
-    auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : "");
+    auto cellCandidate = Add((!name.empty()) ? name + "_cellCandidate" : "");
     cellCandidateX->addChild(cellCandidate, 0, 0);
     cellCandidateH->addChild(cellCandidate, 0, 1);
     auto cellCandidateAct = Tanh((!name.empty()) ? name + "_cellCandidateAct" : "");
@@ -83,7 +83,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     input->addChild(outputGateX, 0, 0);
     auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateH" : "");
     hiddenState->addChild(outputGateH, 1, 0);
-    auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : "");
+    auto outputGate = Add((!name.empty()) ? name + "_outputGate" : "");
     outputGateX->addChild(outputGate, 0, 0);
     outputGateH->addChild(outputGate, 0, 1);
     auto outputGateAct = Sigmoid((!name.empty()) ? name + "_outputGateAct" : "");
@@ -115,7 +115,7 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
         {hiddenState, 1}, {cellState, 1}});
     microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
 
-    auto metaOp = MetaOperator("LSTM", microGraph, name);
+    auto metaOp = MetaOperator("LSTM", microGraph, {}, name);
     addProducer(metaOp, 1, {hiddenChannel, inChannel}, "wi");
     addProducer(metaOp, 2, {hiddenChannel, inChannel}, "wo");
     addProducer(metaOp, 3, {hiddenChannel, inChannel}, "wf");
@@ -143,14 +143,14 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
     auto input = Identity("");
     auto hiddenState = Memorize(seqLength, "");
     auto cellState = Memorize(seqLength, "");
-    auto add = Add(2, "");
+    auto add = Add("");
 
     // Forget gate
     auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(forgetGateX, 0, 0);
     auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(forgetGateH, 1, 0);
-    auto forgetGate = Add(2, "");
+    auto forgetGate = Add("");
     forgetGateX->addChild(forgetGate, 0, 0);
     forgetGateH->addChild(forgetGate, 0, 1);
     auto forgetGateAct = Sigmoid("");
@@ -165,7 +165,7 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
     input->addChild(inputGateX, 0, 0);
     auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(inputGateH, 1, 0);
-    auto inputGate = Add(2, "");
+    auto inputGate = Add("");
     inputGateX->addChild(inputGate, 0, 0);
     inputGateH->addChild(inputGate, 0, 1);
     auto inputGateAct = Sigmoid("");
@@ -179,7 +179,7 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
     input->addChild(cellCandidateX, 0, 0);
     auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(cellCandidateH, 1, 0);
-    auto cellCandidate = Add(2, "");
+    auto cellCandidate = Add("");
     cellCandidateX->addChild(cellCandidate, 0, 0);
     cellCandidateH->addChild(cellCandidate, 0, 1);
     auto cellCandidateAct = Tanh("");
@@ -191,7 +191,7 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
     input->addChild(outputGateX, 0, 0);
     auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(outputGateH, 1, 0);
-    auto outputGate = Add(2,"");
+    auto outputGate = Add("");
     outputGateX->addChild(outputGate, 0, 0);
     outputGateH->addChild(outputGate, 0, 1);
     auto outputGateAct = Sigmoid("");
diff --git a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
index ef319ef38ad18de9eaed0a1d4a92c3877ee7cf8e..bcda67d0ce4c43e4936739affb9d681942062cb1 100644
--- a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
@@ -41,7 +41,7 @@ std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_
         AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
     });
 
-    return MetaOperator("PaddedAvgPooling", graph, name);
+    return MetaOperator(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph, {}, name);
 }
 
 template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
@@ -75,7 +75,7 @@ inline std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<Dim
         AvgPooling(kernel_dims, "", stride_dims)
     });
 
-    return std::make_shared<MetaOperator_Op>("PaddedAvgPooling", graph);
+    return std::make_shared<MetaOperator_Op>(("PaddedAvgPooling" + std::to_string(DIM) + "D").c_str(), graph);
 }
 
 template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
diff --git a/src/operator/MetaOperatorDefs/PaddedConv.cpp b/src/operator/MetaOperatorDefs/PaddedConv.cpp
index 31b1c675e9d577002350ea11dd0b42601a91ef76..ca769026d900868d372bd7207ad616e07041e857 100644
--- a/src/operator/MetaOperatorDefs/PaddedConv.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedConv.cpp
@@ -43,7 +43,7 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConv(Aidge::DimSize_t in_channels,
         Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
         std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "")
     });
-    auto metaOpNode = MetaOperator("PaddedConv", graph, name);
+    auto metaOpNode = MetaOperator(("PaddedConv" + std::to_string(DIM) + "D").c_str(), graph, {}, name);
     addProducer(metaOpNode, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
     if (!no_bias) {
         addProducer(metaOpNode, 2, {out_channels}, "b");
@@ -63,7 +63,7 @@ std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op(
     auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
     auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
 
-    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
+    return std::make_shared<MetaOperator_Op>(("PaddedConv" + std::to_string(DIM) + "D").c_str(), Sequential({pad, conv}));
 }
 template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&);
 template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
diff --git a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
index 1c073b78a61763b46e330089cccfcc4bced352a4..b68794fb9b1ce76ddc5bca8e7d697a7b98d9f141 100644
--- a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
@@ -40,7 +40,7 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(const Aidge::DimSize_t n
         Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
         std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv_depth_wise" : "")
     });
-    auto metaOpNode = MetaOperator("PaddedConvDepthWise", graph, name);
+    auto metaOpNode = MetaOperator(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), graph, {},name);
     addProducer(metaOpNode, 1, append(nb_channels, append(Aidge::DimSize_t(1), kernel_dims)), "w");
     if (!no_bias) {
         addProducer(metaOpNode, 2, {nb_channels}, "b");
@@ -61,7 +61,7 @@ std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op(
     auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
     auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
 
-    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
+    return std::make_shared<MetaOperator_Op>(("PaddedConvDepthWise" + std::to_string(DIM) + "D").c_str(), Sequential({pad, conv}));
 }
 template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&);
 template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index f15a7dc3899a7bc864e8e76ff0946fb70584bf05..bd09e9d1297ec612b08634f59bfe33f0802ef3fd 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -65,20 +65,14 @@ void Aidge::Operator::resetConsummerProducer(){
     mImpl->prodConso()->resetConsummerProducer();
 }
 
-void Aidge::Operator::runHooks() const {
-    for (auto& hook : mHooks) {
-        hook.second->call();
-    }
-}
 void Aidge::Operator::forward() {
     AIDGE_ASSERT(mImpl != nullptr, "forward(): an implementation is required for {}!", type());
     mImpl->forward();
-    runHooks();
 }
 
 void Aidge::Operator::backward() {
     AIDGE_ASSERT(mImpl != nullptr, "backward(): an implementation is required for {}!", type());
-    mImpl->backward(); 
+    mImpl->backward();
 }
 
 void Aidge::Operator::setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends) {
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index ff6fb9ce4b6b8596477dfdd1f43f8927e534459b..3bdb4b17127eb8a9115f8dec045db32bf041b00b 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -45,7 +45,8 @@ Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other)
 
 void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs());
-    AIDGE_ASSERT(data->type() == Tensor::Type, "Input data must be of Tensor type");
+    AIDGE_ASSERT(data != nullptr, "Undefined data argument, make sure that the associated tensor holds data before associating the input.")
+    AIDGE_ASSERT(data->type() == Tensor::Type, "OperatorTensor::associateInput(): Input data must be of Tensor type, got {}", data->type());
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
 }
 
@@ -87,7 +88,7 @@ std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawOutput(const Aidge::IO
 }
 
 const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aidge::IOIndex_t outputIdx) const {
-    AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs());
+    AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs, asked for output#{}", type(), nbOutputs(), outputIdx);
     return mOutputs[outputIdx];
 }
 
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
index 39f61e328bd3f98bc836604462bbfc064fbb93be..a0b5f2df52e373bd92dd57cc621318f2abbb45c9 100644
--- a/src/operator/Pad.cpp
+++ b/src/operator/Pad.cpp
@@ -20,7 +20,7 @@
 #include "aidge/utils/Types.h"
 
 template <Aidge::DimIdx_t DIM>
-const std::string Aidge::Pad_Op<DIM>::Type = "Pad";
+const std::string Aidge::Pad_Op<DIM>::Type = "Pad" + std::to_string(DIM) + "D";
 
 template <Aidge::DimIdx_t DIM>
 std::shared_ptr<Aidge::Operator> Aidge::Pad_Op<DIM>::clone() const {
@@ -34,9 +34,9 @@ bool Aidge::Pad_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
         const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
 
         for (std::size_t dim = 0; dim < DIM; ++dim) {
-            outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
+            outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[dim]
                                 + inputDims[dim+2]
-                                + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
+                                + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[DIM+dim];
         }
         outputDims[1] = inputDims[1];
         outputDims[0] = inputDims[0];
@@ -61,16 +61,18 @@ std::set<std::string> Aidge::Pad_Op<DIM>::getAvailableBackends() const {
 template class Aidge::Pad_Op<1>;
 template class Aidge::Pad_Op<2>;
 
+////////////////////////////////////////////////////////////////////////////////
+
 template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Aidge::Node> Aidge::Pad(const std::array<Aidge::DimSize_t, 2*DIM> &beginEndTuples,
                                            const std::string& name,
-                                           const PadBorderType &borderType,
+                                           PadBorderType borderType,
                                            double borderValue)
 {
     AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", Pad_Op<DIM>::Type);
     return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
 }
 
-template std::shared_ptr<Aidge::Node> Aidge::Pad<1>(const std::array<Aidge::DimSize_t, 2> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
-template std::shared_ptr<Aidge::Node> Aidge::Pad<2>(const std::array<Aidge::DimSize_t, 4> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
-template std::shared_ptr<Aidge::Node> Aidge::Pad<3>(const std::array<Aidge::DimSize_t, 6> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
\ No newline at end of file
+template std::shared_ptr<Aidge::Node> Aidge::Pad<1>(const std::array<Aidge::DimSize_t, 2> &beginEndTuples, const std::string&, PadBorderType, double borderValue);
+template std::shared_ptr<Aidge::Node> Aidge::Pad<2>(const std::array<Aidge::DimSize_t, 4> &beginEndTuples, const std::string&, PadBorderType, double borderValue);
+template std::shared_ptr<Aidge::Node> Aidge::Pad<3>(const std::array<Aidge::DimSize_t, 6> &beginEndTuples, const std::string&, PadBorderType, double borderValue);
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index cd5b18759cdd743f292054bca91ffee5da722ea6..fa77d18e7e3c5b30466304e04cf2ad95affce20e 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -24,6 +24,7 @@ Aidge::Elts_t Aidge::Pop_ProdConso::getNbRequiredData(const Aidge::IOIndex_t inp
     assert(mOp.getRawInput(inputIdx) && "requires valid input");
 
     const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+    AIDGE_ASSERT(!op.getInput(inputIdx)->empty(), "Pop operator requires known, non-empty, input dims for scheduling. You might have an unresolved data dependency upstream in the computing graph.");
     return Elts_t::DataElts(op.getInput(inputIdx)->size()
         / op.getInput(inputIdx)->dims()[0]);
 }
@@ -32,7 +33,7 @@ void Aidge::Pop_OpImpl::forward() {
     const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
 
     assert(op.getInput(0) && "missing input #0");
-    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()});
+    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()}).clone();
 }
 
 //////////////////////////////////////////////////////////
@@ -93,7 +94,7 @@ std::set<std::string> Aidge::Pop_Op::getAvailableBackends() const {
 }
 
 void Aidge::Pop_Op::forward() {
-    Operator::forward();
+    OperatorTensor::forward();
     ++mAttributes->template getAttr<PopAttr::ForwardStep>();
 }
 
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index fdba4ac2e22d857a31779df2e5ff789c3eb92f5c..9af4586886fc98c50862672392d3b704e6bc1d0c 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -44,7 +44,7 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
         attr<ProdAttr::Constant>(constant)))
 {
     mOutputs[0] = tensor; // copy the pointer of the Tensor
-    if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
+    if (mOutputs[0] && mOutputs[0]->hasImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
         SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
     }
     else {
@@ -61,7 +61,7 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     : OperatorTensor(op),
       mAttributes(op.mAttributes)
 {
-    mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0)));
+    *mOutputs[0] = *(op.getOutput(0));
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
         SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
     }
@@ -71,7 +71,12 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
 }
 
 std::shared_ptr<Aidge::Operator> Aidge::Producer_Op::clone() const {
-    return std::make_shared<Producer_Op>(*this);
+    // mOutput cannot be nullptr because of OperatorTensor constructor
+    std::shared_ptr<Tensor> newTensor = std::make_shared<Tensor>(mOutputs[0]->clone());
+
+    std::shared_ptr<Producer_Op> newOp = std::make_shared<Producer_Op>(newTensor, constant());
+
+    return newOp;
 }
 
 void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
@@ -92,8 +97,6 @@ void Aidge::Producer_Op::forward() {
     if (!backend().empty()) {
         mImpl->forward();
     }
-
-    runHooks();
 }
 
 void Aidge::Producer_Op::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const {
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index 9e5762452e382a31c1e5da25708507653da2e474..252f55a6abdea13cc43cf21d3e8c7ab33ddbb86e 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -11,56 +11,31 @@
 
 #include "aidge/operator/Resize.hpp"
 
-#include <cstddef>    // std::size_t
-#include <cstdint>    // std::int64_t
-#include <stdexcept>  // std::runtime_error
+#include <algorithm>
+#include <cstddef>   // std::size_t
+#include <cstdint>   // std::int64_t
+#include <fmt/core.h>
+#include <stdexcept> // std::runtime_error
 #include <string>
 #include <vector>
-#include <fmt/core.h>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Interpolation.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-const std::string Aidge::Resize_Op::Type = "Resize";
-
-Aidge::Resize_Op::Resize_Op()
-    : OperatorTensor(Type,
-        {InputCategory::Data,
-            InputCategory::OptionalData,
-            InputCategory::OptionalData,
-            InputCategory::OptionalData},
-        1) {}
-
-/**
- * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
- * but not its input tensors (the new operator has no input associated).
- * @param op Operator to copy.
- */
-
-Aidge::Resize_Op::Resize_Op(const Aidge::Resize_Op& op)
-    : OperatorTensor(op)
-{
-    if (!op.backend().empty()) {
-        SET_IMPL_MACRO(Resize_Op, *this, op.backend());
-    }
-    else {
-        mImpl = nullptr;
-    }
-}
+namespace Aidge {
 
-std::shared_ptr<Aidge::Operator> Aidge::Resize_Op::clone() const {
-    return std::make_shared<Resize_Op>(*this);
-}
+const std::string Resize_Op::Type = "Resize";
 
-bool Aidge::Resize_Op::dimsForwarded() const {
+bool Resize_Op::dimsForwarded() const {
     // in case of ROI add getInput(1) condition
-    if ((getInput(1) && !getInput(1)->undefined())
-        || (getInput(2) && !getInput(2)->undefined())
-        || (getInput(3) && !getInput(3)->undefined())
-        )
-    {
+    if ((getInput(1) && !getInput(1)->undefined()) ||
+        (getInput(2) && !getInput(2)->undefined()) ||
+        (getInput(3) && !getInput(3)->undefined())) {
         // output dims are data dependent
         return false;
     }
@@ -68,93 +43,137 @@ bool Aidge::Resize_Op::dimsForwarded() const {
     return OperatorTensor::dimsForwarded();
 }
 
-bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
-    if (inputsAssociated()) {
-        AIDGE_ASSERT(getInput(0)->nbDims() == 4,
-            "input tensor must have dimensions = 4 (batch, channel, height, width).");
-
-        const bool input1ROIPresent           = getInput(1) && !getInput(1)->undefined();
-        const bool input2ScalesPresent        = getInput(2) && !getInput(2)->undefined();
-        const bool input3SizesPresent         = getInput(3) && !getInput(3)->undefined();
+bool Resize_Op::forwardDims(bool allowDataDependency) {
+    if (!allowDataDependency) {
+        Log::warn("{}: cannot execute forwardDims() as the output "
+                    "dimensions are computed from some input data.",
+                    type());
+        return false;
+    }
 
-        AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and  sizes can be specified.")
+    // Some optional input may be linked but undefined because of ONNX import
+    if (!inputsAssociated(false)) {
+        return false;
+    }
 
-        if (input1ROIPresent) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Input #1 (ROI) is given and it is not supported.");
+    /** @brief input #0 */
+    constexpr IOIndex_t inDataIdx = 0;
+    /** @brief input #1 */
+    constexpr IOIndex_t inROIIdx = 1;
+    /** @brief input #2 */
+    constexpr IOIndex_t inScalesIdx = 2;
+    /** @brief input #3 */
+    constexpr IOIndex_t inSizesIdx = 3;
+
+    std::vector<DimSize_t> outDims = getInput(inDataIdx)->dims();
+    /////////////////////////////////////////////////////
+    // Ensuring operator is connected properly
+    const bool inputROIPresent =
+        getInput(inROIIdx) && !getInput(inROIIdx)->undefined();
+    if (inputROIPresent) {
+            AIDGE_THROW_OR_ABORT(
+                std::runtime_error,
+                "{}: input ROI(#{}) is present but it is not supported.",
+                type(),
+                inROIIdx);
         }
-        else if (input2ScalesPresent)  {
-            if (!allowDataDependency) {
-                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #2");
-                return false;
-            }
-
-            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(2)->size(),
-                "input #0 and input #2 (Scales) must have the same dimensions.");
-
-            std::vector<DimSize_t>      outDims = getInput(0)->dims();
-            const std::vector<DimSize_t> inDims = getInput(0)->dims();
 
-            std::shared_ptr<Tensor> fallback;
-            const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-
-            for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) {
-                outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
-            }
-
-            mOutputs[0]->resize(outDims);
-            return true;
+    const bool inputScalesPresent =
+        getInput(inScalesIdx) && !getInput(inScalesIdx)->undefined();
+    const bool inputSizesPresent =
+        getInput(inSizesIdx) && !getInput(inSizesIdx)->undefined();
+
+    AIDGE_ASSERT(inputScalesPresent ^ inputSizesPresent,
+                 "{}: Only one of the two inputs must be defined between input "
+                 "Scales(#2) "
+                 "and Sizes(#3). They cannot be specified at the same time.",
+                 type())
+
+    std::shared_ptr<Tensor> resizeParam = inputScalesPresent ? getInput(inScalesIdx) : getInput(inSizesIdx);
+    AIDGE_ASSERT(getInput(inDataIdx)->nbDims() == resizeParam->size(),
+        "{}: data input #0 and resizing parameter input #{} must have the "
+        "same dimensions.",
+        type(), inputScalesPresent ? inScalesIdx :inSizesIdx);
+
+
+    ////////////////////////////////////////////
+    // Case resize is done using Scales formula
+    if (inputScalesPresent) {
+
+        std::shared_ptr<Tensor> fallback;
+        const auto &scales =
+            resizeParam
+                ->refCastFrom(fallback,
+                              DataType::Float32,
+                              resizeParam->backend());
+
+        const std::vector<DimSize_t> inDims = getInput(inDataIdx)->dims();
+        for (std::size_t dim = 0; dim < getInput(inScalesIdx)->size(); ++dim) {
+            const auto scaleAlongDim = scales.get<cpptype_t<DataType::Float32>>(dim);
+            AIDGE_ASSERT(scaleAlongDim > 0,
+                         "{}: all scales values must be sctricly positive, "
+                         "got {}.",
+                         type(),
+                         scaleAlongDim);
+            outDims[dim] =
+                static_cast<DimSize_t>(inDims[dim] * scaleAlongDim);
         }
-        else if (input3SizesPresent) {
-            if (!allowDataDependency) {
-                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #3");
-                return false;
-            }
-
-            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(3)->size(),
-                "input #0 and input #3 (Sizes) must have the same dimensions.");
-
-            std::vector<DimSize_t> outDims = getInput(0)->dims();
 
-            std::shared_ptr<Tensor> fallback;
-            const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-
-            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {
-                outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
-            }
-
-            mOutputs[0]->resize(outDims);
-            return true;
-        }
-        else {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: Either Input #2 or Input #3 must be present.");
+        ///////////////////////////////////////////////////////////////
+        // case where resize output dims are given via the Size input
+    } else {
+        std::shared_ptr<Tensor> fallback;
+        const auto &sizes = resizeParam
+                                ->refCastFrom(fallback,
+                                              NativeType<DimSize_t>::type,
+                                              resizeParam->backend());
+
+        for (std::size_t dim = 0; dim < getInput(inSizesIdx)->size(); ++dim) {
+            outDims[dim] = sizes.get<DimSize_t>(dim);
         }
     }
-
-    return false;
+    mOutputs[0]->resize(outDims);
+    return true;
 }
 
-void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+void Resize_Op::setBackend(const std::string &name, DeviceIdx_t device) {
     SET_IMPL_MACRO(Resize_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 
-    // By default, automatically set backend for all inputs: roi, scales and sizes
-    if(getInput(1)) {
+    // By default, automatically set backend for all optional inputs: roi, scales and
+    // sizes
+    if (getInput(1)) {
         getInput(1)->setBackend(name, device);
     }
-    if(getInput(2)) {
+    if (getInput(2)) {
         getInput(2)->setBackend(name, device);
     }
-    if(getInput(3)) {
+    if (getInput(3)) {
         getInput(3)->setBackend(name, device);
     }
 }
 
-std::set<std::string> Aidge::Resize_Op::getAvailableBackends() const {
-    return Registrar<Resize_Op>::getKeys();
-}
-
-/////////////////////////////////////////////
+std::shared_ptr<Node>
+Resize(std::vector<float> scale,
+        std::vector<std::size_t> size,
+       Interpolation::CoordinateTransformation coordTransfoMode,
+       Interpolation::Mode interpolMode,
+       float cubicCoefA,
+       const std::string &name) {
+    std::shared_ptr<Node> node_resize = std::make_shared<Node>(std::make_shared<Resize_Op>(coordTransfoMode,
+                                                              interpolMode,
+                                                              cubicCoefA),
+                                  name);
+    if (scale.size()) {
+        std::shared_ptr<Node> prod_scale = Producer(std::make_shared<Tensor>(Vector<float>(scale)));
+        prod_scale->addChild(node_resize, 0, 2);
+    }
+    if (size.size())
+    {
+        std::shared_ptr<Node> prod_size = Producer(std::make_shared<Tensor>(Vector<std::size_t>(size)));
+        prod_size->addChild(node_resize, 0, 3);
+    }
+    return node_resize;
 
-std::shared_ptr<Aidge::Node> Aidge::Resize(const std::string &name) {
-    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
-}
\ No newline at end of file
+}
+} // namespace Aidge
diff --git a/src/operator/Round.cpp b/src/operator/Round.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ba4eff9d1e1cf06cc5a4bbda54010aec8c2f2f63
--- /dev/null
+++ b/src/operator/Round.cpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Round.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Round_Op::Type = "Round";
+
+Aidge::Round_Op::Round_Op(const Aidge::Round_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Round_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+
+std::shared_ptr<Aidge::Operator> Aidge::Round_Op::clone() const {
+    return std::make_shared<Round_Op>(*this);
+}
+
+void Aidge::Round_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    mImpl = Registrar<Round_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Round_Op::getAvailableBackends() const {
+    return Registrar<Round_Op>::getKeys();
+}
+
+std::shared_ptr<Aidge::Node> Aidge::Round(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Round_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index 5ac08cd2245e0caa3ca7072c70ccc69bcfcf9558..268a14cf9759a6e03302680814778da4804dcc19 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -18,6 +18,10 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
+
+//Caution: This operator is now deprecated and should no longer be used. 
+//It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
+
 const std::string Aidge::Scaling_Op::Type = "Scaling";
 
 Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
@@ -26,12 +30,15 @@ Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOu
         attr<ScalingAttr::ScalingFactor>(scalingFactor),
         attr<ScalingAttr::QuantizedNbBits>(nbBits),
         attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
-{}
+{
+    Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used.\nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
+} 
 
 Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
     : OperatorTensor(op),
     mAttributes(op.mAttributes)
 {
+    Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used. \nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
     if (op.mImpl){
         SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
     } else {
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index 29a9ee6252a0c2baa6e07bc56e60650685db6bdd..ecaa12191173ac74ace8d6d224ddfe08469eb521 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -20,14 +20,8 @@
 #include "aidge/utils/Types.h"
 
 void Aidge::Shape_OpImpl::forward() {
-    const Shape_Op& op = dynamic_cast<const Shape_Op&>(mOp);
-    const auto start = op.start();
-    const auto end = op.end();
-
-    op.getOutput(0)->getImpl()->copyCast(std::next(op.getInput(0)->dims().data(),
-                                                   start),
-                                         DataType::UInt64,
-                                         end - start + 1);
+    // Do nothing...
+    // Output is already valid after forwardDims()
 }
 
 ///////////////////////////////////////////////
@@ -72,9 +66,14 @@ bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
         const DimSize_t roi = end - start + 1;
 
         AIDGE_ASSERT(start < nbDims && end < nbDims, "'start' and 'end' must be < {}", nbDims);
-        AIDGE_ASSERT(roi> 1, "Unvalid ROI for Shape");
+        AIDGE_ASSERT(roi> 1, "Invalid ROI for Shape");
 
         mOutputs[0]->resize({roi});
+        // Ensure the output of this operator is valid after forwardDims():
+        mOutputs[0]->getImpl()->copyCast(std::next(getInput(0)->dims().data(),
+                                                    start),
+                                            DataType::UInt64,
+                                            end - start + 1);
         return true;
     }
 
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index e3ed13588d8c2b5ddde91d37fc926d675f0666a3..2191f14a150088dfa1d369d2ef31051e5ab16326 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -43,7 +43,7 @@ void Aidge::Split_OpImpl::forward() {
         {
             // Compute chunk position in input tensor
             DimSize_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
-            // Copy chunk in ouput
+            // Copy chunk in output
             op.getOutput(i)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(idx),
                                             splits[i] * stride_post, offset);
             offset += splits[i] * stride_post;
@@ -124,7 +124,7 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
         // Fill Split attr if empty
         if(this->split().empty()) {
             // In case the input Split is not provided, divide the dimension of Axis into equal slices
-            AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} musn't be bigger than dimension {}.", nbOutput, dimToSplit);
+            AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} mustn't be bigger than dimension {}.", nbOutput, dimToSplit);
             DimSize_t baseSliceSize = dimToSplit / nbOutput;
 
             DimSize_t remainder = dimToSplit % nbOutput;
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
index a8b20d21ae1f6c7bfba1a9e52d039f292b6aa62e..b51b4f346c92f778fe0a044df187cd8d0d0f7304 100644
--- a/src/operator/Squeeze.cpp
+++ b/src/operator/Squeeze.cpp
@@ -102,7 +102,7 @@ bool Squeeze_Op::forwardDims(bool allowDataDependency) {
                        axis < static_cast<int8_t>(input_dims.size()),
                    "{} : Axis index OutOfBounds error, expected value "
                    "within size limits of input tensor : "
-                   "[-{},{}), got {}.",
+                   "[-{},{}], got {}.",
                    type(), input_dims.size(), input_dims.size() - 1, axis);
       auto temp =
           static_cast<DimIdx_t>(axis >= 0 ? axis : axis + input_dims.size());
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ab9ddc4f705cb00cebbe5b9ee68fb1433586a043
--- /dev/null
+++ b/src/operator/Stack.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Stack.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+// TODO: Check why getRequiredMemory is always called with empty vector as
+// inputSize
+Elts_t StackProdConso::getRequiredMemory(
+    const Aidge::IOIndex_t inputIdx,
+    const std::vector<DimSize_t> &/*inputsSize*/) const {
+    assert(mOp.getRawInput(inputIdx) && "requires valid input");
+
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    // The produced data after one forward pass is simply the input size,
+    // we do not produce the whole output tensor every time.
+    if (op.forwardStep() <= op.maxElements()) {
+        return Elts_t::DataElts(op.getInput(inputIdx)->size());
+    } else {
+        return Elts_t::NoneElts();
+    }
+}
+
+void StackProdConso::resetConsummerProducer() {
+    ProdConso::resetConsummerProducer();
+
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    op.forwardStep() = 0;
+}
+
+const std::string StackOp::s_type = "Stack";
+
+void StackOpImpl::forward() {
+    const StackOp &op = dynamic_cast<const StackOp &>(mOp);
+    AIDGE_ASSERT(op.getInput(0), "missing input #0");
+    AIDGE_ASSERT((op.forwardStep() < op.maxElements()),
+                 "cannot forward anymore, maximum number of elements to stack "
+                 "exceeded");
+
+    op.getOutput(0)->getImpl()->copy(
+        op.getInput(0)->getImpl()->rawPtr(),
+        op.getInput(0)->size(),
+        op.forwardStep() * op.getInput(0)->size());
+}
+
+StackOp::StackOp(std::uint32_t maxElements)
+    : OperatorTensor(s_type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+          attr<StackAttr::MaxElements>(maxElements),
+          attr<StackAttr::ForwardStep>(0))) {
+    mImpl = std::make_shared<StackOpImpl>(*this);
+}
+
+StackOp::StackOp(const Aidge::StackOp &op)
+    : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(StackOp, *this, op.backend());
+    } else {
+        mImpl = std::make_shared<StackOpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::StackOp::clone() const {
+    return std::make_shared<StackOp>(*this);
+}
+
+bool Aidge::StackOp::dimsForwarded() const {
+    if ((getInput(1) && !getInput(1)->undefined()))
+    {
+        // output dims are data dependent
+        return false;
+    }
+
+    return OperatorTensor::dimsForwarded();
+}
+
+bool Aidge::StackOp::forwardDims(bool allowDataDependency) {
+    if (inputsAssociated()) {
+        // Copy optional input #1 first dimension, if present, to attribute MaxElements
+        if (getInput(1)) {
+            if (!allowDataDependency) {
+                Log::warn("StackOp: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            std::shared_ptr<Tensor> fallback;
+            const auto& maxElements = getInput(1)->refCastFrom(fallback, NativeType<std::uint32_t>::type, "cpu");
+            AIDGE_ASSERT(maxElements.size() > 0, "Input#1 size should be > 0");
+            this->maxElements() = static_cast<std::uint32_t*>(maxElements.getImpl()->hostPtr())[0];
+        }
+
+        AIDGE_ASSERT(this->maxElements() > 0, "Input#1 first element or MaxElements attribute should be > 0");
+
+        auto inputDims = getInput(0)->dims();
+        inputDims.insert(inputDims.begin(), maxElements());
+        getOutput(0)->resize(inputDims);
+        return true;
+    }
+
+    return false;
+}
+
+void StackOp::setBackend(const std::string &name, DeviceIdx_t device) {
+    if (Registrar<StackOp>::exists({name})) {
+        SET_IMPL_MACRO(StackOp, *this, name);
+    } else {
+        mImpl = std::make_shared<StackOpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> StackOp::getAvailableBackends() const {
+    return Registrar<StackOp>::getKeys();
+}
+
+void StackOp::forward() {
+    Operator::forward();
+    ++forwardStep();
+}
+
+std::shared_ptr<Node> Stack(std::uint32_t maxElements,
+                            const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<StackOp>(maxElements),
+                                  name);
+}
+} // namespace Aidge
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 0cb1717f1c96c393b8845db129eee1429966cd98..d24b9c90927830db6f1c1256d133d871ba3dc37f 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -59,6 +59,15 @@ std::shared_ptr<Aidge::Operator> Aidge::Transpose_Op::clone() const {
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         AIDGE_ASSERT(!getInput(0)->empty(), "Not applicable on scalars.");
+        // If permutation vector is not given, reverse the dims of input tensor
+        if (outputDimsOrder().empty())
+        {
+            this->outputDimsOrder().resize(getInput(0)->nbDims());
+            std::iota(this->outputDimsOrder().rbegin(), this->outputDimsOrder().rend(), 0);
+        }
+
+        AIDGE_ASSERT(outputDimsOrder().size() == getInput(0)->nbDims(),
+                     "Permutation vector must have the same rank as input tensor.");
         std::vector<DimSize_t> outputDims;
         for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
             outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
@@ -86,6 +95,6 @@ std::set<std::string> Aidge::Transpose_Op::getAvailableBackends() const {
 //////////////////////////////////////////////////
 
 std::shared_ptr<Aidge::Node> Aidge::Transpose(const std::vector<Aidge::DimSize_t> &outputDimsOrder,
-                                           const std::string& name) {
+                                              const std::string& name) {
     return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
 }
\ No newline at end of file
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
index 43afd160e03395c65c4dcbe5504cb865da4ed8d8..f3353b45cd6a732fa456ea0585ec5d040d53ef31 100644
--- a/src/operator/Unsqueeze.cpp
+++ b/src/operator/Unsqueeze.cpp
@@ -80,7 +80,7 @@ bool Unsqueeze_Op::forwardDims(bool allowDataDependency) {
                      axis < static_cast<int8_t>(output_nb_dims),
                  "{} : Axis index OutOfBounds enrror, expected value "
                  "within size limits of input tensor : "
-                 "[-{},{}), got {}.",
+                 "[-{},{}], got {}.",
                  type(), output_nb_dims, output_nb_dims - 1, axis);
     axes_rectified_idx.push_back(
         static_cast<DimIdx_t>(axis >= 0 ? axis : axis + output_nb_dims));
diff --git a/src/recipes/ConvToMatMul.cpp b/src/recipes/ConvToMatMul.cpp
index 9b88ffc73204b44cf857213d1fdfff49b3191f73..70be33932295aab49653bdc2853f4411ded919b4 100644
--- a/src/recipes/ConvToMatMul.cpp
+++ b/src/recipes/ConvToMatMul.cpp
@@ -24,7 +24,7 @@
 #include "aidge/recipes/Recipes.hpp"
 
 size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) {
-    const auto matches = SinglePassGraphMatching(graphView).match("Conv");
+    const auto matches = SinglePassGraphMatching(graphView).match("Conv2D");
 
     size_t nbReplaced = 0;
     for (const auto& match : matches) {
@@ -75,7 +75,7 @@ size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) {
 
         // Handle bias
         if (convOp->getInput(2) && !convOp->getInput(2)->empty()) {
-            auto add = Add(2, (!convNode->name().empty()) ? convNode->name() + "_add" : "");
+            auto add = Add((!convNode->name().empty()) ? convNode->name() + "_add" : "");
             auto bReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{1, static_cast<int64_t>(convOp->getInput(2)->size()), 1, 1}}),
                 (!convNode->name().empty()) ? convNode->name() + "_b_reshape_shape_prod" : "",
                 true);
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 34722c19f8c0fddaffa7357136f1512a027e1617..50c8f561c1732d6f7f37ae5b8d6f03c4e135939c 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -37,7 +37,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
         const std::shared_ptr<GraphView>  metaOpGraph = metaOp -> getMicroGraph();
         const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> outputNodes = metaOpGraph -> getOrderedOutputs();
         if (outputNodes.size() != 1) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Bad MetaOperator argument for fuseBatchNorm recipie.");
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Bad MetaOperator argument for fuseBatchNorm recipe.");
         }
         convNode = outputNodes[0].first;
     }
@@ -99,7 +99,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
         Log::notice("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
     }
 
-    // Add bias if it is non existant, as there will be a bias after the fuse
+    // Add bias if it is non existent, as there will be a bias after the fuse
     if (!convOp->getInput(2)) {
         if (metaNode) {
             // Conv is inside a meta-operator, we add bias outside it
@@ -190,9 +190,10 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
 }
 
 void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::GraphView> graphView) {
-    auto matches = SinglePassGraphMatching(graphView).match("(Conv|ConvDepthWise|PaddedConv|PaddedConvDepthWise)->BatchNorm");
+    auto matches = SinglePassGraphMatching(graphView).match("(Conv2D|ConvDepthWise2D|PaddedConv2D|PaddedConvDepthWise2D)->BatchNorm2D");
 
     for (auto match : matches) {
+        fmt::println("Match !");
         auto rootNode = match.graph->rootNode();
         fuseBatchNorm(rootNode, *rootNode->getChildren().begin());
     }
diff --git a/src/recipes/FuseToMetaOps.cpp b/src/recipes/FuseToMetaOps.cpp
index 0ad5e5a1da0e6aef74f7e47751dd2d4e8648980b..ac6536d7e7ec89c5eb1b9efb0c301cfa979739cf 100644
--- a/src/recipes/FuseToMetaOps.cpp
+++ b/src/recipes/FuseToMetaOps.cpp
@@ -17,9 +17,9 @@
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/recipes/Recipes.hpp"
 
-size_t Aidge::fuseToMetaOps(std::shared_ptr<GraphView> graphView, const std::string& query, const std::string& type) {
+size_t Aidge::fuseToMetaOps(SinglePassGraphMatching& gm, const std::string& query, const std::string& type) {
     const auto metaType = (!type.empty()) ? type : query;
-    const auto matches = SinglePassGraphMatching(graphView).match(query);
+    const auto matches = gm.match(query);
 
     size_t nbReplaced = 0;
     for (const auto& match : matches) {
@@ -48,3 +48,8 @@ size_t Aidge::fuseToMetaOps(std::shared_ptr<GraphView> graphView, const std::str
     Log::info("Replaced {} (out of {}) matching sub-graph with meta operators", nbReplaced, matches.size());
     return nbReplaced;
 }
+
+size_t Aidge::fuseToMetaOps(std::shared_ptr<GraphView> graphView, const std::string& query, const std::string& type) {
+    SinglePassGraphMatching gm(graphView);
+    return fuseToMetaOps(gm, query, type);
+}
diff --git a/src/recipes/GraphViewHelper.cpp b/src/recipes/GraphViewHelper.cpp
index 9522c0fe7346e78875a08d3ebf19a04dea2909e1..bbc6524ccbe7158476a6bd846d77da754f186091 100644
--- a/src/recipes/GraphViewHelper.cpp
+++ b/src/recipes/GraphViewHelper.cpp
@@ -16,17 +16,20 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/recipes/GraphViewHelper.hpp"
 
 
-std::set<std::shared_ptr<Aidge::Tensor>> Aidge::producers(std::shared_ptr<Aidge::GraphView> graphview) {
+std::set<std::shared_ptr<Aidge::Tensor>> Aidge::producers(std::shared_ptr<Aidge::GraphView> graphview, bool constant) {
     std::set<std::shared_ptr<Tensor>> res;
     const auto& nodes = graphview->getNodes();
     for (const auto& node : nodes) {
         if (node->type() == "Producer") {
-            const auto& param = std::static_pointer_cast<OperatorTensor>(node->getOperator());
-            res.insert(param->getOutput(0));
+            const auto& producer = std::static_pointer_cast<Producer_Op>(node->getOperator());
+            if (!producer->constant() || constant) {
+                res.insert(producer->getOutput(0));
+            }
         }
     }
     return res;
diff --git a/src/recipes/MatMulTiling.cpp b/src/recipes/MatMulTiling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cfc0b191a2f8b47aec92d1dec5ca8a44c95db5db
--- /dev/null
+++ b/src/recipes/MatMulTiling.cpp
@@ -0,0 +1,131 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include <cassert>
+#include <memory>
+#include <set>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/Slice.hpp"
+#include "aidge/operator/Identity.hpp"
+#include "aidge/operator/Concat.hpp"
+#include "aidge/recipes/Recipes.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+// see https://en.wikipedia.org/wiki/Matrix_multiplication_algorithm
+void Aidge::matMulTiling(NodePtr matMul, const std::vector<DimSize_t>& maxDims) {
+    if (matMul->getOperator()->type() != "MatMul") {
+        AIDGE_INTERNAL_ASSERT("Operator should be a MatMul.");
+    }
+    AIDGE_ASSERT(matMul->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
+    const auto& op = std::static_pointer_cast<OperatorTensor>(matMul->getOperator());
+    if (!op->dimsForwarded()) {
+        AIDGE_INTERNAL_ASSERT("Dimensions must be forwarded before any tiling");
+    }
+
+    const auto& in0Tensor = op->getInput(0);
+    const auto& in1Tensor = op->getInput(1);
+    const auto& outTensor = op->getOutput(0);
+    const auto& input0Dims = in0Tensor->dims();
+    const auto& input1Dims = in1Tensor->dims();
+    const auto& outputDims = outTensor->dims();
+    const auto& outputMatDims = std::vector<std::size_t>(outputDims.end() - 2, outputDims.end());;
+
+    if (outputMatDims[0] > maxDims[0] || outputMatDims[1] > maxDims[1]) {
+        const auto sliceDims = (outputMatDims[0] > maxDims[0]) ? input0Dims : input1Dims;
+        std::int32_t axis;
+        std::int64_t splitIndex0_end = static_cast<std::int64_t>(sliceDims.end()[-2]);
+        std::int64_t splitIndex0_start = 0;
+        std::int64_t splitIndex1_end = static_cast<std::int64_t>(sliceDims.end()[-1]);
+        std::int64_t splitIndex1_start = 0;
+
+        if (outputMatDims[0] > maxDims[0]) {
+            splitIndex0_end = maxDims[0];
+            splitIndex0_start = maxDims[0];
+            axis = -2;
+        }
+        else {
+            splitIndex1_end = maxDims[1];
+            splitIndex1_start = maxDims[1];
+            axis = -1;
+        }
+
+        auto identity0 = Identity();
+        auto sliceX0 = Slice();
+        auto sliceX0_starts = Producer(std::make_shared<Tensor>(Vector<std::int64_t>{{0, 0}}), "", true);
+        sliceX0_starts->addChild(sliceX0, 0, 1);
+        auto sliceX0_ends = Producer(std::make_shared<Tensor>(Vector<std::int64_t>{{splitIndex0_end, splitIndex1_end}}), "", true);
+        sliceX0_ends->addChild(sliceX0, 0, 2);
+        auto sliceX0_axes = Producer(std::make_shared<Tensor>(Vector<std::int8_t>{{-2, -1}}), "", true);
+        sliceX0_axes->addChild(sliceX0, 0, 3);
+        auto sliceX0_steps = Producer(std::make_shared<Tensor>(Vector<std::int64_t>{{1, 1}}), "", true);
+        sliceX0_steps->addChild(sliceX0, 0, 4);
+        auto matMulX0 = MatMul();
+        auto identity1 = Identity();
+        auto sliceX1 = Slice();
+        auto sliceX1_starts = Producer(std::make_shared<Tensor>(Vector<std::int64_t>{{splitIndex0_start, splitIndex1_start}}), "", true);
+        sliceX1_starts->addChild(sliceX1, 0, 1);
+        auto sliceX1_ends = Producer(std::make_shared<Tensor>(Vector<std::int64_t>{{static_cast<std::int64_t>(sliceDims.end()[-2]), static_cast<std::int64_t>(sliceDims.end()[-1])}}), "", true);
+        sliceX1_ends->addChild(sliceX1, 0, 2);
+        auto sliceX1_axes = Producer(std::make_shared<Tensor>(Vector<std::int8_t>{{-2, -1}}), "", true);
+        sliceX1_axes->addChild(sliceX1, 0, 3);
+        auto sliceX1_steps = Producer(std::make_shared<Tensor>(Vector<std::int64_t>{{1, 1}}), "", true);
+        sliceX1_steps->addChild(sliceX1, 0, 4);
+        auto matMulX1 = MatMul();
+        auto concat = Concat(2, axis);
+
+        if (outputMatDims[0] > maxDims[0]) {
+            identity0->addChild(sliceX0, 0, 0);
+            identity0->addChild(sliceX1, 0, 0);
+            identity1->addChild(matMulX0, 0, 1);
+            identity1->addChild(matMulX1, 0, 1);
+            sliceX0->addChild(matMulX0, 0, 0);
+            sliceX1->addChild(matMulX1, 0, 0);
+        }
+        else {
+            identity0->addChild(matMulX0, 0, 0);
+            identity0->addChild(matMulX1, 0, 0);
+            identity1->addChild(sliceX0, 0, 0);
+            identity1->addChild(sliceX1, 0, 0);
+            sliceX0->addChild(matMulX0, 0, 1);
+            sliceX1->addChild(matMulX1, 0, 1);
+        }
+    
+        matMulX0->addChild(concat, 0, 0);
+        matMulX1->addChild(concat, 0, 1);
+
+        auto gMatMul = std::make_shared<GraphView>();
+        gMatMul->add({matMul});
+
+        auto g = std::make_shared<GraphView>();
+        g->add({identity0});
+        g->add({identity1});
+        g->add({sliceX0, sliceX0_starts, sliceX0_ends, sliceX0_axes, sliceX0_steps, matMulX0, matMulX1, sliceX1, sliceX1_starts, sliceX1_ends, sliceX1_axes, sliceX1_steps, concat});
+
+        auto replaced = GraphView::replace(gMatMul, g);
+
+        if (replaced) {
+            g->forwardDims({}, true);
+
+            // Recursive tiling
+            matMulTiling(matMulX1, maxDims);
+            matMulTiling(matMulX0, maxDims);
+        }
+        else {
+            Log::warn("Unable to split MatMul {}", matMul->name());
+        }
+    }
+}
diff --git a/src/recipes/MatMulToFC.cpp b/src/recipes/MatMulToFC.cpp
index 9b5addd3bb971b3f61980a582d4cce6435c57219..8d902c680b8fa0d30a873b6f355734ce19d608f5 100644
--- a/src/recipes/MatMulToFC.cpp
+++ b/src/recipes/MatMulToFC.cpp
@@ -34,7 +34,7 @@ void Aidge::matMulToFC(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
 
 
     // Step 1 : Create FC
-    // Fetch the output dimension throught the bias size
+    // Fetch the output dimension through the bias size
     std::shared_ptr<Node> bias = nullptr;
     if (addNode) {
         if (addNode->getParent(0) == matmulNode) {
@@ -76,7 +76,7 @@ void Aidge::matMulToFC(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     }
     AIDGE_ASSERT(weight != nullptr, "Could not deduce weight input for MatMul operator.");
 
-    // Instanciate FC
+    // Instantiate FC
     std::string fcName = matmulNode->name();
     if (addNode && !addNode->name().empty()) {
         fcName += "_" + addNode->name();
diff --git a/src/recipes/RemoveNode.cpp b/src/recipes/RemoveNode.cpp
index a09c67991409dfe491d46b4ad739f9ddf5b72aef..3a1bac588ee8a1bb38f74fee441c9eff07b4ef6e 100644
--- a/src/recipes/RemoveNode.cpp
+++ b/src/recipes/RemoveNode.cpp
@@ -13,24 +13,15 @@
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Matching.hpp"
 #include "aidge/recipes/Recipes.hpp"
 
-
-//Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
-
 size_t Aidge::removeNode(std::shared_ptr<GraphView> graphView, const std::string& type, bool incProducers) {
-    std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
-    regex->setNodeKey(type, "getType($) =='" + type + "'");
-    regex->addQuery(type + "#");
-
-    const auto matches = regex->match(graphView);
-    for (const auto& solution : matches) {
-        assert(solution->at(type).size() == 1 && "Wrong number of nodes to replace\n");
-
-        std::set<NodePtr> nodesToRemove = solution->at(type);
+    auto matches = SinglePassGraphMatching(graphView).match(type);
+    for (const auto& match : matches) {
+        std::set<NodePtr> nodesToRemove = {match.graph->rootNode()};
         if (incProducers) {
-            for (const auto& nodePtr: (*solution->at(type).begin())->getParents()) {
+            for (const auto& nodePtr: match.graph->rootNode()->getParents()) {
                 if (nodePtr != nullptr && nodePtr->type() == "Producer") {
                     nodesToRemove.insert(nodePtr);
                 }
diff --git a/src/scheduler/MemoryManager.cpp b/src/scheduler/MemoryManager.cpp
index 6fe0d1f0745a464b8fd61bf634d7105b9d22faf8..ba805f919a607e0b2ae3272d173aa11360548fa7 100644
--- a/src/scheduler/MemoryManager.cpp
+++ b/src/scheduler/MemoryManager.cpp
@@ -898,7 +898,7 @@ Aidge::MemoryManager::getMaxHole(std::shared_ptr<MemorySpace> memSpace) const
                     std::make_pair((*itPlane).allocated, holeSize));
 
                 if (!newInsert) {
-                    // Another plane exists at the same time, one must substract
+                    // Another plane exists at the same time, one must subtract
                     // the size of this other plane from the hole size
                     (*it).second = std::max(0, static_cast<int>((*it).second)
                         - static_cast<int>((*itPlane).getContiguousSize())
diff --git a/src/scheduler/ParallelScheduler.cpp b/src/scheduler/ParallelScheduler.cpp
index 2b9a1f5b62741d5f08dfc3e5aa45b1102d54b850..2a44dd49f961bdcdf965a33d2ffe91f3ed8ae352 100644
--- a/src/scheduler/ParallelScheduler.cpp
+++ b/src/scheduler/ParallelScheduler.cpp
@@ -46,7 +46,7 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std::
 
     const auto namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})");
 
-    // Sort static scheduling, the order will be the prefered threads scheduling
+    // Sort static scheduling, the order will be the preferred threads scheduling
     // order for non critical nodes
     std::deque<StaticSchedulingElement*> staticSchedule(mStaticSchedule.at(mStaticScheduleStep).begin(), mStaticSchedule.at(mStaticScheduleStep).end());
     std::stable_sort(staticSchedule.begin(), staticSchedule.end(),
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index f416fec2b63847c2ca293a5859361f436fe71ca9..2e9dc034ef0bf2b0be2ee27c26b1995a2d0e4244 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -70,22 +70,20 @@ std::vector<Aidge::Scheduler::StaticSchedulingElement*> Aidge::Scheduler::genera
     std::vector<StaticSchedulingElement*> schedule;
 
 
-    // 1) Initialize consumers list:
-    // 1.1) List of the GraphView's input nodes
-    std::set<std::shared_ptr<Node>> consumers = mGraphView->inputNodes();
-
-    // 1.2) List of nodes inside the GraphView connected to an inner Producer
+    // 1) Initialize consumers list: start from the output nodes and
+    // find the required prior producers/consumers at step 2).
+    // Beware that generateBaseScheduling() can be called multiple time
+    // with some node having already produced some data. In this case,
+    // we should always consume available data first. This is ensured
+    // by setting the consumers list to the output nodes and then recursively
+    // find the dependencies.
+    // The initial list may contain producer nodes, in which case
+    // getPriorProducersConsumers() at step 2 will have moved it in
+    // the requiredProducers list.
+    std::set<std::shared_ptr<Node>> consumers = mGraphView->outputNodes();
     std::set<std::shared_ptr<Node>> producers;
-    for (const std::shared_ptr<Node>& nodePtr : mGraphView->getNodes()) {
-        if (nodePtr->type() == Producer_Op::Type) {
-            for (const auto& child : nodePtr->getChildren()) {
-                // Do not schedule childs outside current graph!
-                if (mGraphView->inView(child)) {
-                    consumers.insert(child);
-                }
-            }
-        }
-    }
+    std::string level1Diagnostic;
+    std::string level2Diagnostic;
 
     do {
         // 2) From the current consumers list, check if any prior consumer node
@@ -148,22 +146,37 @@ std::vector<Aidge::Scheduler::StaticSchedulingElement*> Aidge::Scheduler::genera
         // there is multiple successive priors for example).
         std::set<std::shared_ptr<Node>> runnableConsumers;
         Log::debug("Updated list of consumers:");
+        level1Diagnostic.clear();
+        level2Diagnostic.clear();
         for (const auto& consumer : consumers) {
             summarizeConsumerState(consumer, namePtrTable.at(consumer));  // debug print
 
             bool isRunnable = true;
             for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) {
                 AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx);
+                AvailableDataStatus status;
 
                 if ((consumer->getOperator()->getNbConsumedData(inputIdx) + consumer->getOperator()->getNbRequiredData(inputIdx)) >
-                            getNbAvailableData(consumer, inputIdx)) {
+                            getNbAvailableData(consumer, inputIdx, status)) {
                     Log::debug("  not runnable: C{} + R{} > P{} for input #{}",
                         consumer->getOperator()->getNbConsumedData(inputIdx),
                         consumer->getOperator()->getNbRequiredData(inputIdx),
-                        getNbAvailableData(consumer, inputIdx), inputIdx);
+                        getNbAvailableData(consumer, inputIdx, status), inputIdx);
 
                     // not enough data to run
                     isRunnable = false;
+                    if (status == Scheduler::AvailableDataStatus::UpperNodeInputFound
+                        || status == Scheduler::AvailableDataStatus::NotConnected)
+                    {
+                        level1Diagnostic += fmt::format("- No data available for node {} input #{}. {}\n", namePtrTable.at(consumer), inputIdx, fmt::styled(status, fmt::fg(fmt::color::red)));
+                    }
+                    else {
+                        level2Diagnostic += fmt::format("- No data available for node {} input #{}. {}\n", namePtrTable.at(consumer), inputIdx, fmt::styled(status, fmt::fg(fmt::color::green)));
+                        level2Diagnostic += fmt::format("  ↳ Available data is {}, but {} was already consummed and {} more is required.\n",
+                            getNbAvailableData(consumer, inputIdx, status),
+                            consumer->getOperator()->getNbConsumedData(inputIdx),
+                            consumer->getOperator()->getNbRequiredData(inputIdx));
+                    }
                     break;
                 }
             }
@@ -208,12 +221,13 @@ std::vector<Aidge::Scheduler::StaticSchedulingElement*> Aidge::Scheduler::genera
             for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) {
                 if (consumer->inputCategory(inputIdx) == InputCategory::Data) {
                     AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx);
+                    AvailableDataStatus status;
 
                     if (consumer->getOperator()->getNbConsumedData(inputIdx) <
-                                getNbAvailableData(consumer, inputIdx)) {
+                                getNbAvailableData(consumer, inputIdx, status)) {
                         Log::debug("  still consumer: C{} < P{} for input #{}",
                             consumer->getOperator()->getNbConsumedData(inputIdx),
-                            getNbAvailableData(consumer, inputIdx), inputIdx);
+                            getNbAvailableData(consumer, inputIdx, status), inputIdx);
 
                         // there is still data to consume
                         isStillConsumer = true;
@@ -297,7 +311,15 @@ std::vector<Aidge::Scheduler::StaticSchedulingElement*> Aidge::Scheduler::genera
             std::back_inserter(consumersName),
             [&namePtrTable](auto val){ return namePtrTable.at(val); });
 
-        Log::warn("Remaining consumers: {}. Possible dead-lock.", consumersName);
+        Log::warn("Remaining consumers: {}.", consumersName);
+
+        Log::info("Reasons:");
+        if (!level1Diagnostic.empty()) {
+            Log::info(level1Diagnostic);
+        }
+        else {
+            Log::info(level2Diagnostic);
+        }
     }
 
     return schedule;
@@ -307,19 +329,23 @@ std::vector<Aidge::Scheduler::StaticSchedulingElement*> Aidge::Scheduler::genera
 void Aidge::Scheduler::summarizeConsumerState(const std::shared_ptr<Aidge::Node>& consumer, const std::string& nodeName) const {
     Log::debug("\t- consumer: {}", fmt::styled(nodeName, fg(fmt::color::orange)));
     std::string crLog = "\t\tC/R:\t";
-    for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) {
-        crLog += fmt::format("{}/{}\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
-                consumer->getOperator()->getNbRequiredData(inId));
+    if (consumer->nbInputs() > 0) {
+        for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) {
+            crLog += fmt::format("{}/{}\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
+                                 consumer->getOperator()->getNbRequiredData(inId));
+        }
+        crLog += fmt::format("{}/{}", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
+                             consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
     }
-    crLog += fmt::format("{}/{}", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
-            consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
     Log::debug("{}", crLog);
 
     std::string pLog = "\t\tP:\t";
-    for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) {
-        pLog += fmt::format("{}\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
+    if (consumer->nbOutputs() > 0) {
+        for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) {
+            pLog += fmt::format("{}\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
+        }
+        pLog += fmt::format("{}", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
     }
-    pLog += fmt::format("{}", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
     Log::debug("{}", pLog);
 }
 
@@ -650,23 +676,27 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::Scheduler::getConsumers(
     return consumers;
 }
 
-Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>& node, const IOIndex_t inputIdx) const {
+Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>& node, const IOIndex_t inputIdx, AvailableDataStatus& status) const {
     const auto parent = node->inputs()[inputIdx];
 
     if (parent.first) {
         // Parent is connected, everything if fine!
+        status = AvailableDataStatus::Connected;
         return parent.first->getOperator()->getNbProducedData(parent.second);
     }
     else if (std::shared_ptr<Node> upperNode = mUpperNode.lock()) {
-        // We are inside an upper operator (for instance a MetaOperator)
-        // We need to connect the "local" producer-consumer model to the upper
-        // one, by mapping local node inputs to the upper node inputs.
+        // We are inside an upper operator (for instance a MetaOperator).
+        // Check if the node input is also an upper node input...
         IOIndex_t upperInputIdx = 0;
         for (const auto& input : mGraphView->getOrderedInputs()) {
             if (input.first == node && input.second == inputIdx) {
-                // Current node is an input
+                // Current node is an input!
+                // We need to connect the "local" producer-consumer model to the upper
+                // one, by mapping local node inputs to the upper node inputs.
+                status = AvailableDataStatus::UpperNodeInputFound;
                 const auto upperInput = upperNode->inputs()[upperInputIdx];
                 if (upperInput.first) {
+                    status = AvailableDataStatus::UpperNodeInputConnected;
                     return upperInput.first->getOperator()->getNbProducedData(upperInput.second);
                 }
             }
@@ -678,6 +708,7 @@ Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>&
     // - There is no data, it is assumed to be an optional input
     // - A valid tensor exists:
     if (node->getOperator()->getRawInput(inputIdx)) {
+        status = AvailableDataStatus::ValidTensor;
         // => This means data was fed manually to the input, without a Producer
         // In this case, we assume a single-use data (unlike a Producer, which
         // keep producing the data each time it is needed).
@@ -685,6 +716,7 @@ Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>&
         return Elts_t::DataElts(std::static_pointer_cast<Tensor>(node->getOperator()->getRawInput(inputIdx))->size());
     }
 
+    status = AvailableDataStatus::NotConnected;
     return Elts_t::NoneElts();
 }
 
@@ -740,7 +772,9 @@ Aidge::Scheduler::getPriorProducersConsumers(const std::shared_ptr<Node>& node)
     }
 
     prior.isPrior = true;
-    if (prior.priorConsumers.empty()) {
+    if (node->type() == Producer_Op::Type) {
+        prior.requiredProducers.insert(node);
+    } else if (prior.priorConsumers.empty()) {
         prior.priorConsumers.insert(node);
     }
     mPriorCache.insert(std::make_pair(node, prior));
diff --git a/src/utils/DynamicAttributes.cpp b/src/utils/DynamicAttributes.cpp
index 909d3bb2f5fda977ac497a19e1a1088eb52cfc88..6e64861605b3c853da0d754065e24ed43aba0f63 100644
--- a/src/utils/DynamicAttributes.cpp
+++ b/src/utils/DynamicAttributes.cpp
@@ -11,18 +11,40 @@
 
 #include "aidge/utils/DynamicAttributes.hpp"
 
-std::map<std::type_index, bool(*)(const future_std::any&, const future_std::any&)> Aidge::DynamicAttributes::mAnyCompare;
+std::map<std::type_index, std::unique_ptr<Aidge::DynamicAttributes::AnyUtils_>> Aidge::DynamicAttributes::mAnyUtils = []() {
+    std::map<std::type_index, std::unique_ptr<Aidge::DynamicAttributes::AnyUtils_>> m;
+    m.emplace(typeid(DynamicAttributes), std::unique_ptr<AnyUtils<DynamicAttributes>>(new AnyUtils<DynamicAttributes>()));
+    return m;
+}();
+
+template<> void Aidge::DynamicAttributes::setAttr<future_std::any>(const std::string& name, const future_std::any& value)
+{
+    const auto dot = name.find('.');
+    if (dot == name.npos) {
+        AIDGE_ASSERT(mAnyUtils.find(value.type()) != mAnyUtils.end(), "DynamicAttributes::setAttr(): cannot set value to std::any of never seen type.");
+
+        auto res = mAttrs.emplace(std::make_pair(name, value));
+        if (!res.second)
+            res.first->second = value;
+    }
+    else {
+        const auto ns = name.substr(0, dot);
+        const auto nsName = name.substr(dot + 1);
+        auto res = mAttrs.emplace(std::make_pair(ns, future_std::any(DynamicAttributes())));
+        future_std::any_cast<DynamicAttributes&>(res.first->second).setAttr<future_std::any>(nsName, value);
+    }
+}
 
 bool future_std::operator<(const future_std::any& lhs, const future_std::any& rhs) {
     if (lhs.type() == rhs.type()) {
-        return Aidge::DynamicAttributes::mAnyCompare.at(lhs.type())(lhs, rhs);
+        return Aidge::DynamicAttributes::mAnyUtils.at(lhs.type())->compare(lhs, rhs);
     }
 #ifdef PYBIND
     else if (lhs.type() == typeid(py::object)) {
-        return Aidge::DynamicAttributes::mAnyCompare.at(rhs.type())(lhs, rhs);
+        return Aidge::DynamicAttributes::mAnyUtils.at(rhs.type())->compare(lhs, rhs);
     }
     else if (rhs.type() == typeid(py::object)) {
-        return Aidge::DynamicAttributes::mAnyCompare.at(lhs.type())(lhs, rhs);
+        return Aidge::DynamicAttributes::mAnyUtils.at(lhs.type())->compare(lhs, rhs);
     }
 #endif
     else {
diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
index da32a8e0ec6a3c9f27da5c47f9e6166c1fc879bc..136fcc16fe5f83f684f0686fe64105023f8cc688 100644
--- a/src/utils/Log.cpp
+++ b/src/utils/Log.cpp
@@ -56,13 +56,13 @@ std::string Aidge::Log::mFileName = []() {
     }
     return std::string();
 }();
-std::unique_ptr<FILE, decltype(&std::fclose)> Aidge::Log::mFile {nullptr, nullptr};
+std::unique_ptr<FILE, Aidge::Log::fcloseDeleter> Aidge::Log::mFile {nullptr, Aidge::Log::fcloseDeleter()};
 std::vector<std::string> Aidge::Log::mContext;
 
 void Aidge::Log::log(Level level, const std::string& msg) {
     if (level >= mConsoleLevel) {
         // Apply log level style only for console.
-        // Styles that were already applied to msg with fmt are kept also in 
+        // Styles that were already applied to msg with fmt are kept also in
         // the log file.
         const auto modifier
             = !mConsoleColor ? fmt::text_style()
@@ -94,14 +94,17 @@ void Aidge::Log::log(Level level, const std::string& msg) {
 }
 
 void Aidge::Log::initFile(const std::string& fileName) {
-    mFile = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen(fileName.c_str(), "a"), &std::fclose);
+    FILE* rawFile = std::fopen(fileName.c_str(), "a");
 
-    if (!mFile) {
+    if (!rawFile) {
         mFileName.clear(); // prevents AIDGE_THROW_OR_ABORT() to try to log into file
         AIDGE_THROW_OR_ABORT(std::runtime_error,
             "Could not create log file: {}", fileName);
     }
 
+    // Assign the new FILE* with the deleter
+    mFile = std::unique_ptr<FILE, fcloseDeleter>(rawFile, fcloseDeleter());
+
     const std::time_t t = std::time(nullptr);
     fmt::println(mFile.get(), "###### {:%Y-%m-%d %H:%M:%S} ######", fmt::localtime(t));
 }
diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index fd96b060630c162e93143e8f51019a0ce3e82cc9..2c0c746a412643c568ff07d8c50b5f5d8cbf72d5 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -1,12 +1,17 @@
-Include(FetchContent)
+find_package(Catch2 3.7.1)
 
-FetchContent_Declare(
-  Catch2
-  GIT_REPOSITORY https://github.com/catchorg/Catch2.git
-  GIT_TAG        v3.0.1 # or a later release
-)
+if(NOT Catch2_FOUND)
+    message(STATUS "Catch2 not found in system, retrieving from git")
+    Include(FetchContent)
 
-FetchContent_MakeAvailable(Catch2)
+    FetchContent_Declare(
+      Catch2
+      GIT_REPOSITORY https://github.com/catchorg/Catch2.git
+      GIT_TAG        v3.7.1 # or a later release
+    )
+
+    FetchContent_MakeAvailable(Catch2)
+endif()
 
 file(GLOB_RECURSE src_files "*.cpp")
 
diff --git a/unit_tests/backend/Test_TensorImpl.cpp b/unit_tests/backend/Test_TensorImpl.cpp
index 43e25092a0f502698bbff7b0142969154f2cb0b0..2f6ef519935295dce5edd0d486c9f5ba6e307331 100644
--- a/unit_tests/backend/Test_TensorImpl.cpp
+++ b/unit_tests/backend/Test_TensorImpl.cpp
@@ -34,7 +34,7 @@ TEST_CASE("[backend/cpu/data] Tensor", "[TensorImpl]") {
 }
 
 TEST_CASE("Tensor fill", "[TensorImpl][fill]") {
-  SECTION("Instantiate batches independantly") {
+  SECTION("Instantiate batches independently") {
     // initialization with 0s
     std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
     //concatenatedTensor->print();
@@ -47,6 +47,7 @@ TEST_CASE("Tensor fill", "[TensorImpl][fill]") {
     concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0);
     concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5);
     concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10);
+    REQUIRE_THROWS(concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 11));
     // concatenatedTensor->print();
 
     std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{
diff --git a/unit_tests/data/Test_Interpolation.cpp b/unit_tests/data/Test_Interpolation.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..73a48125a7f6e300cebe483dfc1cf025fdfc7707
--- /dev/null
+++ b/unit_tests/data/Test_Interpolation.cpp
@@ -0,0 +1,76 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstdlib>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Interpolation.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+TEST_CASE("[core/data] Interpolation", "[Interpolation][Data]") {
+    Log::setConsoleLevel(Log::Debug);
+
+    auto tensor = std::make_shared<Tensor>(std::vector<DimSize_t>({10, 10}));
+    tensor->setDataType(DataType::Float32);
+    tensor->setBackend("cpu");
+    Aidge::constantFiller(tensor, 1337.F);
+
+    SECTION("retrieveNeighbours") {
+        std::set<Interpolation::Point<float>> neighbours;
+        std::set<Interpolation::Point<float>> expectedResult;
+
+        std::vector<float> coords;
+        SECTION("Out of bounds") {
+            coords = {-0.5, -0.5};
+            expectedResult = {{{-1, -1}, 0.f},
+                              {{0, -1}, 0.F},
+                              {{-1, 0}, 0.F},
+                              {{0, 0}, 1337.F}};
+            neighbours = Interpolation::retrieveNeighbours<float>(
+                reinterpret_cast<float *>(tensor->getImpl()->rawPtr()),
+                tensor->dims(),
+                coords,
+                PadBorderType::Zero);
+
+            CHECK(neighbours == expectedResult);
+        }
+        SECTION("Some coords are rounds hence duplicates are filtered out") {
+            tensor = std::make_shared<Tensor>(
+                std::vector<DimSize_t>({5, 10, 10, 10}));
+            tensor->setDataType(DataType::Float32);
+            tensor->setBackend("cpu");
+            Aidge::constantFiller(tensor, 1337.F);
+
+            expectedResult = {{{0, 0, -1, -1}, 0.F},
+                              {{0, 0, 0, -1}, 0.F},
+                              {{0, 0, -1, 0}, 0.F},
+                              {{0, 0, 0, 0}, 1337.F}};
+
+            neighbours = Interpolation::retrieveNeighbours(
+                reinterpret_cast<float *>(tensor->getImpl()->rawPtr()),
+                tensor->dims(),
+                std::vector<float>({0, 0, -0.25, -0.25}));
+            CHECK(expectedResult == neighbours);
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index d5cd8cdcfb88beee9aab2393b0c5591c79a70b80..6c4b14602aed98ff5736d2cf30ba642f9e7ec57b 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -9,9 +9,9 @@
  *
  ********************************************************************************/
 
-#include <array>
 #include <cstddef>     // std::size_t
 #include <cstdint>     // std::uint8_t, std::uint16_t, std::int32_t
+#include <cstdlib>
 #include <numeric>     // std::accumulate, std::inner_product
 #include <functional>  // std::multiplies
 #include <random>      // std::mt19937,
@@ -42,7 +42,8 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T_default.strides() == std::vector<DimSize_t>({1})) &&
             (T_default.getImpl() == nullptr) &&
             (T_default.grad() != nullptr) &&
-            (T_default.isContiguous() == true)
+            (T_default.isContiguous() == true) &&
+            (T_default.capacity() == 0)
         ));
     }
     SECTION("scalar constructor") {
@@ -119,7 +120,27 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
         ));
     }
     SECTION("copy constructor / copy assignment operator") {
+        Tensor t1 = Array1D<int, 2>{{1, 2}};
+        Tensor t2, t3;
 
+        REQUIRE_NOTHROW(t3 = t1);
+        REQUIRE(t1 == t3);
+
+        REQUIRE_NOTHROW(t2 = Tensor(t1));
+        REQUIRE(t1 == t2);
+
+
+        t1.set<int>(0, 10);
+
+        // check copies are shallow
+        REQUIRE(t2.get<int>(0) == 10);
+        REQUIRE(t3.get<int>(0) == 10);
+
+        // set already existing Tensor
+        Tensor t4 = Array1D<int, 1>{{11}};
+        REQUIRE_NOTHROW(t4 = t1);
+        REQUIRE(t4 == t1);
+        REQUIRE(t4.size() == 2);
     }
     SECTION("move constructor / move assignment operator") {
 
@@ -339,32 +360,63 @@ TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
                 }
 
                 // Test get() and set() by coords
-                // We create coords of rank 0 to the number of dimensions
-                for (std::size_t coord_size = 0; coord_size < dims.size(); ++coord_size) {
-                    std::vector<std::size_t> coords(coord_size);
-                    for (std::size_t coord_idx = 0; coord_idx < coord_size; ++coord_idx) {
-                        std::size_t dim_idx = (dimsDist(gen)-1) % dims[coord_idx];
-                        coords[coord_idx] = dim_idx;
-                    }
-                    std::size_t flat_idx, flat_storage_idx;
-                    // As it is continuous we have getIdx() == getStorageIdx()
-                    REQUIRE_NOTHROW(flat_idx = x.getIdx(coords));
-                    REQUIRE_NOTHROW(flat_storage_idx = x.getStorageIdx(coords));
-                    REQUIRE(flat_storage_idx == flat_idx);
-                    float val, val_flat;
-                    // Test get() by index and by coords
-                    REQUIRE_NOTHROW(val_flat = x.get<float>(flat_idx));
-                    REQUIRE_NOTHROW(val = x.get<float>(coords));
-                    REQUIRE(val == val_flat);
-                    REQUIRE(val == values[flat_idx]);
-                    // Test set() by coords, also update the reference array
-                    REQUIRE_NOTHROW(x.set(coords, val + 1));
-                    values[flat_idx] += 1;
+                // We create coords of the number of dimensions
+                std::vector<std::size_t> coords(nb_dims);
+                for (std::size_t coord_idx = 0; coord_idx < nb_dims; ++coord_idx) {
+                    std::size_t dim_idx = (dimsDist(gen)-1) % dims[coord_idx];
+                    coords[coord_idx] = dim_idx;
                 }
+                std::size_t flat_idx, flat_storage_idx;
+                // As it is continuous we have getIdx() == getStorageIdx()
+                REQUIRE_NOTHROW(flat_idx = x.getIdx(coords));
+                REQUIRE_NOTHROW(flat_storage_idx = x.getStorageIdx(coords));
+                REQUIRE(flat_storage_idx == flat_idx);
+                float val, val_flat;
+                // Test get() by index and by coords
+                REQUIRE_NOTHROW(val_flat = x.get<float>(flat_idx));
+                REQUIRE_NOTHROW(val = x.get<float>(coords));
+                REQUIRE(val == val_flat);
+                REQUIRE(val == values[flat_idx]);
+                // Test set() by coords, also update the reference array
+                REQUIRE_NOTHROW(x.set(coords, val + 1));
+                values[flat_idx] += 1;
             }
         }
     }
-
+    SECTION("Index & coord manipulation"){
+            Tensor tensor;
+            std::vector<DimSize_t> dims {2,2};
+            int nbVal = std::accumulate(dims.begin(),
+                                        dims.end(),
+                                        1,
+                                        std::multiplies<DimSize_t>());
+            float* values = static_cast<float*>(malloc(nbVal * sizeof(float)));
+            values[0] = 0;
+            values[1] = 1;
+            values[2] = 2;
+            values[3] = 3;
+            tensor.setDataType(DataType::Int32);
+            tensor.setBackend("cpu");
+            tensor.resize(dims);
+            tensor.getImpl()->setRawPtr(values, 4);
+            std::vector<std::size_t> coords;
+        SECTION("getIdx"){
+            CHECK(Tensor::toIndex(tensor.dims(), std::vector<std::size_t>({1,1}) ) == 3);
+            CHECK(Tensor::toIndex(tensor.dims(), std::vector<std::size_t>({1,0}) ) == 2);
+            // No check to ensure if value is in bounds
+            CHECK_THROWS(tensor.getIdx(std::vector<std::size_t>({0,2})));
+        }
+        SECTION("getCoord"){
+            CHECK(Tensor::toCoord(tensor.dims(),  3 ) ==std::vector<std::size_t>({1,1}));
+            CHECK(Tensor::toCoord(tensor.dims(),  2 ) ==std::vector<std::size_t>({1,0}));
+        }
+        SECTION("isInBound"){
+            CHECK_THROWS(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,2,4,5})) == true);
+            CHECK(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,2})) == false);
+            CHECK(Tensor::isInBounds(dims, std::vector<int>({-1,1})) == false);
+            CHECK(Tensor::isInBounds(dims, std::vector<DimSize_t>({1,1})) == true);
+        }
+    }
     SECTION("Tensor extract") {
         bool equal;
 
@@ -457,7 +509,7 @@ TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
     SECTION("Pretty printing for debug") {
         Tensor x{};
         // Empty Tensor
-        REQUIRE_THROWS(x.print());
+        REQUIRE_NOTHROW(x.print());
         // scalar
         x = Tensor(42);
         REQUIRE_NOTHROW(x.print());
diff --git a/unit_tests/graph/Test_Connector.cpp b/unit_tests/graph/Test_Connector.cpp
index 79acce9281039f9f3c67b7235d8999b6c7173685..3fe2318c81a95528e46900e82fa7e4b69f5e28f5 100644
--- a/unit_tests/graph/Test_Connector.cpp
+++ b/unit_tests/graph/Test_Connector.cpp
@@ -184,7 +184,7 @@ TEST_CASE("Connector Mini-graph", "[Connector]") {
     // g->save("TestGraph");
 }
 
-TEST_CASE("Structural descrition - Sequential", "[GraphView]") {
+TEST_CASE("Structural description - Sequential", "[GraphView]") {
     // SECTION("Empty Sequence") {
     //     std::shared_ptr<GraphView> g1 = Sequential(); // Not supported
     //     REQUIRE(g1->getNodes() == std::set<std::shared_ptr<Node>>());
@@ -256,7 +256,7 @@ TEST_CASE("Structural description - Parallel", "[GraphView]") {
     }
 }
 
-TEST_CASE("Strucutral Description - Complex Graph", "[GraphView]") {
+TEST_CASE("Structural Description - Complex Graph", "[GraphView]") {
     std::shared_ptr<Node> firstLayer = GenericOperator("first", 1, 0, 1);
     auto g = Sequential({firstLayer,
                     GenericOperator("l2", 1, 0, 1),
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index a08808ee5e6c2657a76213dcff80cec53b23e7ee..5bd435e28718d663519e504995fd5b030913d254 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -195,7 +195,7 @@ TEST_CASE("remove", "[GraphView][remove]") {
             // g2->save("./remove2");
 
             REQUIRE(nodePtrTo(g1->getNodes(), nodePtrToName) == nodePtrTo(g2->getNodes(), nodePtrToName));
-            // Order not garanteed, because when a node is removed, it can create new GraphView inputs/outputs
+            // Order not guaranteed, because when a node is removed, it can create new GraphView inputs/outputs
             // Their order thus depends on the deletion order!
             //REQUIRE(nodePtrTo(g1->getOrderedInputs(), nodePtrToName) == nodePtrTo(g2->getOrderedInputs(), nodePtrToName));
             //REQUIRE(nodePtrTo(g1->getOrderedOutputs(), nodePtrToName) == nodePtrTo(g2->getOrderedOutputs(), nodePtrToName));
@@ -248,7 +248,7 @@ TEST_CASE("[core/graph] GraphView(add)", "[GraphView][add]") {
 
     SECTION("Several Nodes") {
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("TestGraph");
-        // should automaticaly add parents for learnable parameters
+        // should automatically add parents for learnable parameters
         std::shared_ptr<Node> GOp1 = GenericOperator("Fictive", 0, 1, 1, "Gop1");
         std::shared_ptr<Node> GOp1parent = GenericOperator("Fictive", 0, 0, 1, "Gop1parent");
         GOp1parent->addChild(GOp1, 0, 0);
@@ -257,7 +257,7 @@ TEST_CASE("[core/graph] GraphView(add)", "[GraphView][add]") {
         REQUIRE(nodePtrTo(g->getOrderedInputs(), nodePtrToName) == std::vector<std::pair<std::string, IOIndex_t>>({}));
         REQUIRE(nodePtrTo(g->getOrderedOutputs(), nodePtrToName) == std::vector<std::pair<std::string, IOIndex_t>>({{"Gop1", 0}}));
 
-        // there should be no deplicates
+        // there should be no duplicates
         g->add(GOp1);
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({GOp1, GOp1parent}));
         REQUIRE(nodePtrTo(g->getOrderedInputs(), nodePtrToName) == std::vector<std::pair<std::string, IOIndex_t>>({}));
@@ -396,7 +396,7 @@ TEST_CASE("[core/graph] GraphView(save)") {
 }
 
 TEST_CASE("[core/graph] GraphView(resetConnections)") {
-    SECTION("disconnect data iput") {
+    SECTION("disconnect data input") {
         std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
         std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 2, 1, "c1");
         std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
@@ -447,10 +447,10 @@ TEST_CASE("[core/graph] GraphView(getOrderedNodes)", "[GraphView][getOrderedNode
     auto data1 = Producer({2}, "data1");
     auto data2 = Producer({2}, "data2");
     auto data3 = Producer({2}, "data3");
-    auto add1 = Add(2, "add1");
-    auto add2 = Add(2, "add2");
+    auto add1 = Add("add1");
+    auto add2 = Add("add2");
     auto split1 = Split(2, 0, {1, 1}, "split1");
-    auto add3 = Add(3, "add3");
+    auto add3 = Add("add3");
     auto g = std::make_shared<GraphView>("TestGraph");
     data1->addChild(add1);
     data2->addChild(add1);
@@ -508,9 +508,9 @@ TEST_CASE("[core/graph] GraphView(getOrderedNodes)", "[GraphView][getOrderedNode
 TEST_CASE("[core/graph] GraphView(getOrderedNodes) cyclic", "[GraphView][getOrderedNodes]") {
     auto data1 = Producer({2}, "data1");
     auto data2 = Producer({2}, "data2");
-    auto add1 = Add(2, "add1");
+    auto add1 = Add("add1");
     auto mem1 = Memorize(1, "mem1");
-    auto add2 = Add(2, "add2");
+    auto add2 = Add("add2");
     auto g = std::make_shared<GraphView>("TestGraph");
     data1->addChild(add1);
     data2->addChild(add1);
@@ -816,7 +816,7 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
     }
 }
 
-TEST_CASE("[GraphView] clone") {
+TEST_CASE("[GraphView] clone", "[GraphView][Core][Clone]") {
     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
     auto conv1 = Conv(3, 32, {3, 3}, "conv1");
     auto conv2 = Conv(32, 64, {3, 3}, "conv2");
diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp
index 2fdcd611d378ceb6c3dbdc853920eecf92c31141..582c73565a4ef7bfc96e493e1e6029b1683676ab 100644
--- a/unit_tests/graph/Test_Matching.cpp
+++ b/unit_tests/graph/Test_Matching.cpp
@@ -50,11 +50,11 @@ TEST_CASE("[core/graph] Matching") {
         ReLU("relu2"),
         PaddedConv(8, 16, {3, 3}, "conv3", {1, 1}, {2, 2, 2, 2}),
         ReLU("relu3"),
-        PaddedConv(8, 16, {5, 5}, "conv4", {1, 1}, {2, 2, 2, 2}),
-        Add(2, "add"),
-        PaddedConv(8, 16, {5, 5}, "conv5", {1, 1}, {2, 2, 2, 2}),
+        PaddedConv(16, 16, {5, 5}, "conv4", {1, 1}, {2, 2, 2, 2}),
+        Add("add"),
+        PaddedConv(16, 16, {5, 5}, "conv5", {1, 1}, {2, 2, 2, 2}),
         ReLU("relu5"),
-        Add(2, "add2")
+        Add("add2")
     });
 
     g1->getNode("relu3")->addChild(g1->getNode("add"), 0, 1);
@@ -65,8 +65,8 @@ TEST_CASE("[core/graph] Matching") {
     expandMetaOps(g1);
     g1->save("Test_examples", true);
 
-    SECTION("Conv->(ReLU->Pad->Conv)*") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU->Pad->Conv)*");
+    SECTION("Conv2D->(ReLU->Pad2D->Conv2D)*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D->(ReLU->Pad2D->Conv2D)*");
 
         checkMatches(results, {
             {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "relu1", "relu2"}},
@@ -77,24 +77,24 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv->ReLU;ReLU->Pad") {
-        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv->ReLU;ReLU->Pad"));
+    SECTION("Conv2D->ReLU;ReLU->Pad2D") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv2D->ReLU;ReLU->Pad2D"));
     }
 
-    SECTION("Conv->ReLU#1;ReLU#2->Pad") {
-        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv->ReLU#1;ReLU#2->Pad"));
+    SECTION("Conv2D->ReLU#1;ReLU#2->Pad2D") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv2D->ReLU#1;ReLU#2->Pad2D"));
     }
 
-    SECTION("Conv?->ReLU") {
-        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv?->ReLU"));
+    SECTION("Conv2D?->ReLU") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv2D?->ReLU"));
     }
 
     SECTION("(Add#<*~.)*") {
         REQUIRE_THROWS(SinglePassGraphMatching(g1).match("(Add#<*~.)*"));
     }
 
-    SECTION("Conv->(ReLU~>Pad->Conv)*") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU~>Pad->Conv)*");
+    SECTION("Conv2D->(ReLU~>Pad2D->Conv2D)*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D->(ReLU~>Pad2D->Conv2D)*");
 
         checkMatches(results, {
             {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}},
@@ -105,8 +105,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv->(ReLU~>Pad->Conv)* [disjoint]") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU~>Pad->Conv)*", true);
+    SECTION("Conv2D->(ReLU~>Pad2D->Conv2D)* [disjoint]") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D->(ReLU~>Pad2D->Conv2D)*", true);
 
         checkMatches(results, {
             {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}},
@@ -114,8 +114,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv~>(ReLU~>Pad->Conv)*") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv~>(ReLU~>Pad->Conv)*");
+    SECTION("Conv~>(ReLU~>Pad2D->Conv2D)*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D~>(ReLU~>Pad2D->Conv2D)*");
 
         checkMatches(results, {
             {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}},
@@ -126,8 +126,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Pad->Conv#->ReLU;Conv#<1-Producer;Conv#<2-Producer") {
-        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;Conv#<1-Producer;Conv#<2-Producer");
+    SECTION("Pad2D->Conv2D#->ReLU;Conv2D#<1-Producer;Conv2D#<2-Producer") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#->ReLU;Conv2D#<1-Producer;Conv2D#<2-Producer");
 
         checkMatches(results, {
             {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
@@ -135,8 +135,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Pad->Conv#~>ReLU;Conv#<1-Producer;Conv#<2-Producer") {
-        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;Conv#<1-Producer;Conv#<2-Producer");
+    SECTION("Pad2D->Conv2D#~>ReLU;Conv2D#<1-Producer;Conv2D#<2-Producer") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#~>ReLU;Conv2D#<1-Producer;Conv2D#<2-Producer");
 
         checkMatches(results, {
             {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
@@ -145,8 +145,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Pad->Conv#~>ReLU;(Conv#<*-Producer){2}") {
-        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;(Conv#<*-Producer){2}");
+    SECTION("Pad2D->Conv2D#~>ReLU;(Conv2D#<*-Producer){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#~>ReLU;(Conv2D#<*-Producer){2}");
 
         checkMatches(results, {
             {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
@@ -155,8 +155,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Pad->Conv#->ReLU;(Conv#<*-Producer){2}") {
-        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;(Conv#<*-Producer){2}");
+    SECTION("Pad2D->Conv2D#->ReLU;(Conv2D#<*-Producer){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#->ReLU;(Conv2D#<*-Producer){2}");
 
         checkMatches(results, {
             {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
@@ -164,8 +164,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Pad->Conv#~>ReLU;(Conv#<*-.){2}") {
-        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;(Conv#<*-.){2}");
+    SECTION("Pad2D->Conv2D#~>ReLU;(Conv2D#<*-.){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#~>ReLU;(Conv2D#<*-.){2}");
 
         checkMatches(results, {
             {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
@@ -174,8 +174,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Pad->Conv#->ReLU;(Conv#<*-.){2}") {
-        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;(Conv#<*-.){2}");
+    SECTION("Pad2D->Conv2D#->ReLU;(Conv2D#<*-.){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad2D->Conv2D#->ReLU;(Conv2D#<*-.){2}");
 
         checkMatches(results, {
             {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
@@ -184,7 +184,7 @@ TEST_CASE("[core/graph] Matching") {
     }
 
     SECTION("Conv#~>ReLU*;Conv#<-Pad*") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU*;Conv#<-Pad*");
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#~>ReLU*;Conv2D#<-Pad2D*");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -195,8 +195,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#->ReLU*;Conv#<-Pad*") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU*;Conv#<-Pad*");
+    SECTION("Conv2D#->ReLU*;Conv2D#<-Pad2D*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#->ReLU*;Conv2D#<-Pad2D*");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -207,8 +207,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#->ReLU?-*>Add#1?->ReLU?;Conv#<-Pad?;(Add#1<*-.)?") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU?-*>Add#1?->ReLU?;Conv#<-Pad?;(Add#1<*-.)?");
+    SECTION("Conv2D#->ReLU?-*>Add#1?->ReLU?;Conv2D#<-Pad2D?;(Add#1<*-.)?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#->ReLU?-*>Add#1?->ReLU?;Conv2D#<-Pad2D?;(Add#1<*-.)?");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -219,8 +219,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#~>ReLU?-*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*-.)?") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?-*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*-.)?");
+    SECTION("Conv2D#~>ReLU?-*>Add#1?~>ReLU?;Conv2D#<-Pad?;(Add#1<*-.)?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#~>ReLU?-*>Add#1?~>ReLU?;Conv2D#<-Pad2D?;(Add#1<*-.)?");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -231,8 +231,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#~>ReLU?~*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*~.)?") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?~*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*~.)?");
+    SECTION("Conv2D#~>ReLU?~*>Add#1?~>ReLU?;Conv2D#<-Pad2D?;(Add#1<*~.)?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#~>ReLU?~*>Add#1?~>ReLU?;Conv2D#<-Pad2D?;(Add#1<*~.)?");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -243,8 +243,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#->ReLU?;Conv#<-Pad?") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU?;Conv#<-Pad?");
+    SECTION("Conv2D#->ReLU?;Conv2D#<-Pad2D?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#->ReLU?;Conv2D#<-Pad2D?");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -255,8 +255,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#~>ReLU?;Conv#<-Pad?") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?;Conv#<-Pad?");
+    SECTION("Conv2D#~>ReLU?;Conv2D#<-Pad2D?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D#~>ReLU?;Conv2D#<-Pad2D?");
 
         checkMatches(results, {
             {"conv1", {"conv1", "relu1"}},
@@ -267,8 +267,8 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("(Conv|ReLU)->Add") {
-        const auto results = SinglePassGraphMatching(g1).match("(Conv|ReLU)->Add");
+    SECTION("(Conv2D|ReLU)->Add") {
+        const auto results = SinglePassGraphMatching(g1).match("(Conv2D|ReLU)->Add");
 
         checkMatches(results, {
             {"conv4_conv", {"add", "conv4_conv"}},
@@ -294,33 +294,33 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv~*>(ReLU&Add)") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv~*>(ReLU&Add)");
+    SECTION("Conv2D~*>(ReLU&Add)") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D~*>(ReLU&Add)");
 
         checkMatches(results, {
             {"conv5_conv", {"add2", "conv5_conv", "relu5"}}
         });
     }
 
-    SECTION("Conv~>(ReLU&Add)") {
-        const auto results = SinglePassGraphMatching(g1).match("Conv~>(ReLU&Add)");
+    SECTION("Conv2D~>(ReLU&Add)") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv2D~>(ReLU&Add)");
         REQUIRE(results.size() == 0);
     }
 
-    SECTION("ReLU~*>((Pad->Conv-*>Add#)&Add#)") {
-        const auto results = SinglePassGraphMatching(g1).match("ReLU~*>((Pad->Conv-*>Add#)&Add#)");
+    SECTION("ReLU~*>((Pad2D->Conv2D-*>Add#)&Add#)") {
+        const auto results = SinglePassGraphMatching(g1).match("ReLU~*>((Pad2D->Conv2D-*>Add#)&Add#)");
 
         checkMatches(results, {
             {"relu3", {"add", "conv4_conv", "conv4_pad", "relu3"}}
         });
     }
 
-    SECTION("ReLU-*>((Pad->Conv-*>Add)&Add)") {
-        const auto results = SinglePassGraphMatching(g1).match("ReLU-*>((Pad->Conv-*>Add)&Add)");
+    SECTION("ReLU-*>((Pad2D->Conv2D-*>Add)&Add)") {
+        const auto results = SinglePassGraphMatching(g1).match("ReLU-*>((Pad2D->Conv2D-*>Add)&Add)");
         REQUIRE(results.size() == 0);
     }
 
-    SECTION("Pad->Conv[3x3]->ReLU") {
+    SECTION("Pad2D->Conv2D[3x3]->ReLU") {
         auto gm = SinglePassGraphMatching(g1);
         gm.addNodeLambda("3x3", [](const NodePtr& node) {
             const std::shared_ptr<Conv_Op<2>> op =
@@ -328,20 +328,20 @@ TEST_CASE("[core/graph] Matching") {
             return (op->kernelDims() == std::array<DimSize_t, 2>({3, 3}));
         });
 
-        const auto results = gm.match("Pad->Conv[3x3]->ReLU");
+        const auto results = gm.match("Pad2D->Conv2D[3x3]->ReLU");
 
         checkMatches(results, {
             {"conv3_pad", {"conv3_conv", "conv3_pad", "relu3"}}
         });
     }
 
-    SECTION(".[test]->Pad") {
+    SECTION(".[test]->Pad2D") {
         auto gm = SinglePassGraphMatching(g1);
         gm.addNodeLambda("test", [](const NodePtr& node) {
             return (node->type() == "Add" || (node->type() == "ReLU" && node->name() == "relu1"));
         });
 
-        const auto results = gm.match(".[test]->Pad");
+        const auto results = gm.match(".[test]->Pad2D");
 
         checkMatches(results, {
             {"add", {"add", "conv5_pad"}},
@@ -352,11 +352,11 @@ TEST_CASE("[core/graph] Matching") {
     auto g2 = Sequential({
         Producer({16, 3, 512, 512}, "dataProvider"),
         Conv(3, 4, {5, 5}, "conv1"),
-        BatchNorm<2>(4, 1.0e-5, 0.1, "bn1"),
+        BatchNorm<2>(4, 1.0e-5, 0.1, false, "bn1"),
         Conv(4, 4, {5, 5}, "conv2"),
         ReLU("relu2"),
         Conv(4, 4, {5, 5}, "conv3"),
-        BatchNorm<2>(4, 1.0e-5, 0.1, "bn3"),
+        BatchNorm<2>(4, 1.0e-5, 0.1, false, "bn3"),
         FC(4, 4, false, "fc1"),
         FC(4, 4, false, "fc2"),
         FC(4, 4, false, "fc3"),
@@ -364,16 +364,16 @@ TEST_CASE("[core/graph] Matching") {
         Conv(1, 4, {5, 5}, "conv4")
     });
 
-    SECTION("((Conv#->(.[exBN]|$))|(FC#->(.[exFC])*->$))") {
+    SECTION("((Conv2D#->(.[exBN]|$))|(FC#->(.[exFC])*->$))") {
         auto gm = SinglePassGraphMatching(g2);
         gm.addNodeLambda("exBN", [](const NodePtr& node) {
-            return (node->type() != "BatchNorm");
+            return (node->type() != "BatchNorm2D");
         });
         gm.addNodeLambda("exFC", [](const NodePtr& node) {
             return (node->type() != "FC");
         });
 
-        const auto results = gm.match("((Conv#->(.[exBN]|$))|(FC#->(.[exFC])*->$))");
+        const auto results = gm.match("((Conv2D#->(.[exBN]|$))|(FC#->(.[exFC])*->$))");
 
         checkMatches(results, {
             {"conv2", {"conv2", "relu2"}},
@@ -396,13 +396,13 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv#->(.[exConv])*->$") {
+    SECTION("Conv2D#->(.[exConv])*->$") {
         auto gm = SinglePassGraphMatching(g2);
         gm.addNodeLambda("exConv", [](const NodePtr& node) {
-            return (node->type() != "Conv");
+            return (node->type() != "Conv2D");
         });
 
-        const auto results = gm.match("Conv#->(.[exConv])*->$");
+        const auto results = gm.match("Conv2D#->(.[exConv])*->$");
 
         checkMatches(results, {
             {"conv4", {"conv4"}}
@@ -423,13 +423,13 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("(((FC#|Conv#)<-(.[exParam])*<-$)|((FC#|Conv#)->(.[exParam])*->$));(FC#|Conv#)<1-Producer#") {
+    SECTION("(((FC#|Conv2D#)<-(.[exParam])*<-$)|((FC#|Conv2D#)->(.[exParam])*->$));(FC#|Conv2D#)<1-Producer#") {
         auto gm = SinglePassGraphMatching(g2);
         gm.addNodeLambda("exParam", [](const NodePtr& node) {
-            return (node->type() != "FC" && node->type() != "Conv");
+            return (node->type() != "FC" && node->type() != "Conv2D");
         });
 
-        const auto results = gm.match("(((FC#|Conv#)<-(.[exParam])*<-$)|((FC#|Conv#)->(.[exParam])*->$));(FC#|Conv#)<1-Producer#");
+        const auto results = gm.match("(((FC#|Conv2D#)<-(.[exParam])*<-$)|((FC#|Conv2D#)->(.[exParam])*->$));(FC#|Conv2D#)<1-Producer#");
 
         checkMatches(results, {
             {"conv1", {"conv1", "conv1_w", "dataProvider"}},
@@ -437,13 +437,13 @@ TEST_CASE("[core/graph] Matching") {
         });
     }
 
-    SECTION("Conv->ReLU [perf]") {
+    SECTION("Conv2D->ReLU [perf]") {
         const size_t nbTests = 3;
         std::mt19937::result_type seed(1);
 
         for (int test = 0; test < nbTests; ++test) {
             RandomGraph randGraph;
-            randGraph.types = {"Conv", "ReLU", "Dummy"};
+            randGraph.types = {"Conv2D", "ReLU", "Dummy"};
             randGraph.typesWeights = {0.4, 0.4, 0.2};
             randGraph.avgIn = 1;
             randGraph.maxIn = 1;
@@ -460,7 +460,7 @@ TEST_CASE("[core/graph] Matching") {
             auto gm = SinglePassGraphMatching(g1);
 
             const auto start = std::chrono::system_clock::now();
-            const auto results = gm.match("Conv->ReLU#;ReLU#->Dummy");
+            const auto results = gm.match("Conv2D->ReLU#;ReLU#->Dummy");
             const auto end = std::chrono::system_clock::now();
             const auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
 
diff --git a/unit_tests/graph/Test_StaticAnalysis.cpp b/unit_tests/graph/Test_StaticAnalysis.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9488cbaf60fffcaee32a573993a46a0a440a4dea
--- /dev/null
+++ b/unit_tests/graph/Test_StaticAnalysis.cpp
@@ -0,0 +1,68 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include <fmt/chrono.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/graph/StaticAnalysis.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/MetaOperatorDefs.hpp"
+#include "aidge/operator/Producer.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[core/graph] StaticAnalysis") {
+    SECTION("Conv") {
+        auto g1 = Sequential({
+            Conv(3, 4, {5, 5}, "conv1"),
+            ReLU("relu1"),
+            PaddedConv(4, 8, {5, 5}, "conv2", {1, 1}, {2, 2, 2, 2}),
+            ReLU("relu2"),
+            PaddedConv(8, 16, {3, 3}, "conv3", {1, 1}, {2, 2, 2, 2}),
+            ReLU("relu3"),
+            PaddedConv(16, 16, {5, 5}, "conv4", {1, 1}, {2, 2, 2, 2}),
+            Add("add"),
+            PaddedConv(16, 16, {5, 5}, "conv5", {1, 1}, {2, 2, 2, 2}),
+            ReLU("relu5"),
+            Add("add2")
+        });
+
+        g1->getNode("relu3")->addChild(g1->getNode("add"), 0, 1);
+        g1->getNode("conv5")->addChild(g1->getNode("add2"), 0, 1);
+        g1->updateInputsOutputs();
+
+        g1->forwardDims({{16, 3, 512, 512}});
+
+        StaticAnalysis stats(g1);
+        REQUIRE(stats.getNbParams(g1->getNode("conv1")) == 3 * 4 * 5 * 5 + 4);
+        REQUIRE(stats.getNbParams(g1->getNode("conv2")) == 4 * 8 * 5 * 5 + 8);
+        REQUIRE(stats.getNbParams(g1->getNode("conv3")) == 8 * 16 * 3 * 3 + 16);
+
+        const auto conv1Stats = stats.getOpStats(g1->getNode("conv1"));
+        REQUIRE(conv1Stats->getNbMACOps() == 1LL * (16 * 508 * 508) * (5 * 5 * 3 * 4));
+        REQUIRE(conv1Stats->getNbArithmOps() == 2LL * (16 * 508 * 508) * (5 * 5 * 3 * 4));
+        REQUIRE(conv1Stats->getNbArithmFpOps() == 2LL * (16 * 508 * 508) * (5 * 5 * 3 * 4));
+        REQUIRE(conv1Stats->getNbArithmIntOps() == 0);
+
+        g1->getNode("conv1")->getOperator()->setDataType(DataType::Int8);
+        REQUIRE(conv1Stats->getNbMACOps() == 1LL * (16 * 508 * 508) * (5 * 5 * 3 * 4));
+        REQUIRE(conv1Stats->getNbArithmOps() == 2LL * (16 * 508 * 508) * (5 * 5 * 3 * 4));
+        REQUIRE(conv1Stats->getNbArithmFpOps() == 0);
+        REQUIRE(conv1Stats->getNbArithmIntOps() == 2LL * (16 * 508 * 508) * (5 * 5 * 3 * 4));
+    }
+}
diff --git a/unit_tests/graphRegex/Test_FsmMatch.cpp b/unit_tests/graphRegex/Test_FsmMatch.cpp
index 008251feaac9d2dbe21aae3dfc7ebaa69e828ae7..6229ec62af96802ebdfe871e6058ab1791cf80fd 100644
--- a/unit_tests/graphRegex/Test_FsmMatch.cpp
+++ b/unit_tests/graphRegex/Test_FsmMatch.cpp
@@ -52,7 +52,7 @@ TEST_CASE("FsmMatch") {
     }
 
 
-    SECTION("2 branche graph"){
+    SECTION("2 branches graph"){
 
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
         std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
diff --git a/unit_tests/graphRegex/Test_GraphLexer.cpp b/unit_tests/graphRegex/Test_GraphLexer.cpp
index 1b8cc8e018546ebfe3f84202d9404db27b17449b..615c052041e40c2941054b7ddcc19229c0994f0d 100644
--- a/unit_tests/graphRegex/Test_GraphLexer.cpp
+++ b/unit_tests/graphRegex/Test_GraphLexer.cpp
@@ -104,7 +104,7 @@ TEST_CASE("GraphRegex", "Lexer") {
 
 
             std::ostringstream errorMessage;
-            errorMessage << "\n we whant :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
+            errorMessage << "\n we want :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
 
             CAPTURE(errorMessage.str());
             REQUIRE(token->getLexeme() == lexemToFind);
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
index 68ac509e79e347106a9a132249f125ebe6e39f6a..79e471d44a49dfb52fd5eb4aa1ed2dc4ab8dc0bb 100644
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -153,9 +153,9 @@ TEST_CASE("GraphRegexUser") {
 
       // generate the original GraphView
         auto matmul0 = MatMul("matmul0");
-        auto add0 = Add(2, "add0");
+        auto add0 = Add("add0");
         auto matmul1 = MatMul("matmul1");
-        auto add1 = Add(2, "add1");
+        auto add1 = Add("add1");
 
         auto b0 = Producer({5}, "B0");
         auto w0 = Producer({5, 5}, "W0");
diff --git a/unit_tests/graphRegex/Test_examples.cpp b/unit_tests/graphRegex/Test_examples.cpp
index d85ae5c893a7ae4497125a62dad3cde97dac5195..0ccc05b5a957673167a16643b22bf047fc80f43f 100644
--- a/unit_tests/graphRegex/Test_examples.cpp
+++ b/unit_tests/graphRegex/Test_examples.cpp
@@ -40,7 +40,7 @@ TEST_CASE("Examples", "[GraphMatching]") {
 
     auto regex = std::make_shared<GraphRegex>();
     regex->setKeyFromGraph(g1);
-    regex->addQuery("Pad->Conv->ReLU");
+    regex->addQuery("Pad2D->Conv2D->ReLU");
     // Won't work, wrong number of matches:
     //regex->addQuery("Pad*->Conv->ReLU*");
 
@@ -52,4 +52,4 @@ TEST_CASE("Examples", "[GraphMatching]") {
     }
 }
 
-}  // namespace Aidge
\ No newline at end of file
+}  // namespace Aidge
diff --git a/unit_tests/graphRegex/Test_graphRegexAST.cpp b/unit_tests/graphRegex/Test_graphRegexAST.cpp
index 1cdb0bc1934983a26ab742bfe8879455077219cc..f9c7a7c5dc8ab103b3b566f2df77883b0b1966f1 100644
--- a/unit_tests/graphRegex/Test_graphRegexAST.cpp
+++ b/unit_tests/graphRegex/Test_graphRegexAST.cpp
@@ -55,7 +55,7 @@ TEST_CASE("GraphStrInterpreter") {
         for (const std::string& test : tests) {
             std::shared_ptr<GraphStrInterpreter>  strGenerator = std::make_shared<GraphStrInterpreter>(test);
             std::string astString = strGenerator->interpret();
-            //supress space in the test becase erase in the AST
+            //suppress space in the test because erase in the AST
             std::string testNoS = test;
             testNoS.erase(std::remove_if(testNoS.begin(), testNoS.end(), ::isspace), testNoS.end());
             //if the last char is ; (SEP) it will not in the AST and it's not a bug erase it
diff --git a/unit_tests/nodeTester/Test_ConditionalLexer.cpp b/unit_tests/nodeTester/Test_ConditionalLexer.cpp
index a937c27227dde4fa03ed7733df9e9552c3c1ac7b..d79824e2e53d0c9621fdc6847ebca00faba03af4 100644
--- a/unit_tests/nodeTester/Test_ConditionalLexer.cpp
+++ b/unit_tests/nodeTester/Test_ConditionalLexer.cpp
@@ -130,7 +130,7 @@ TEST_CASE("nodeTester", "Lexer") {
 
 
             std::ostringstream errorMessage;
-            errorMessage << "\n we whant :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
+            errorMessage << "\n we want :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
 
             CAPTURE(errorMessage.str());
             REQUIRE(token->getLexeme() == lexemToFind);
diff --git a/unit_tests/operator/Test_Clip_Op.cpp b/unit_tests/operator/Test_Clip_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..51d6d248067c89be9703fd4d48f02347f285330a
--- /dev/null
+++ b/unit_tests/operator/Test_Clip_Op.cpp
@@ -0,0 +1,110 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Clip.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Clip_Op(forwardDims)", "[Clip][forwardDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    auto rd = Catch::Generators::Detail::getSeed;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Clip Operator
+    std::shared_ptr<Node> myClip = Clip();
+    auto op = std::static_pointer_cast<OperatorTensor>(myClip -> getOperator());
+
+    // Input tensor
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(Array2D<int,2,2> {
+            {
+                {1, 2},
+                {3, 4}
+            }
+        });
+    op -> associateInput(0,T0);
+    // Tensor representign Min value
+    std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(2.0);
+    op -> associateInput(1,Tmin);
+    
+    // Tensor representign Max value
+    std::shared_ptr<Tensor> Tmax = std::make_shared<Tensor>(4.0);
+    op -> associateInput(2,Tmax);
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::forwardDims()``
+     */
+    SECTION("Scalar") {
+        //We set every Input as a Scalar
+        T0->resize({});
+        Tmin->resize({});
+        Tmax->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims(true));
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Normal Input") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(Array2D<int,2,2> {
+        {
+            {1, 2},
+            {3, 4}
+        }
+        });
+        const std::size_t nb_dims = nbDimsDist(gen);
+        std::vector<std::size_t> dims(nb_dims);
+        for (std::size_t i = 0; i < nb_dims; ++i)
+        {
+            dims[i] = dimsDist(gen);
+        }
+        T0->resize(dims);
+        op->associateInput(0,T0);
+        REQUIRE_NOTHROW(op->forwardDims(true));
+        REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    
+    SECTION("Min and max attributes ")
+    {
+        std::shared_ptr<Node> clip_attr = Clip("",-1.0,2.0);
+        auto opc = std::static_pointer_cast<OperatorTensor>(clip_attr -> getOperator());
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        opc -> associateInput(0,T0);
+        std::shared_ptr<Tensor> Tmin = std::make_shared<Tensor>(7);
+        opc-> associateInput(1,Tmin);
+        std::shared_ptr<Tensor> Tmax = std::make_shared<Tensor>(4);
+        opc -> associateInput(2,Tmax);
+
+        REQUIRE_NOTHROW(opc->forwardDims(true));
+        REQUIRE((opc->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Min and max attributes (No Input for min and max)")
+    {
+        std::shared_ptr<Node> clip = Clip("",-1.0,2.0);
+        auto opcl = std::static_pointer_cast<OperatorTensor>(clip -> getOperator());
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        opcl -> associateInput(0,T0);
+        REQUIRE_NOTHROW(opcl->forwardDims());
+        REQUIRE((opcl->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 41bad69749fd82f892c6faa625739d0493396c73..c82c55f165278c985dabd771cd6481a4839ada2c 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -82,7 +82,7 @@ TEST_CASE("[core/operator] GenericOp(type check)", "[Operator]") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         Testop.addAttr<long>("LongAttr", 3);
 
-        // This line should raise a failled assert
+        // This line should raise a failed assert
         REQUIRE_THROWS(Testop.getAttr<int>("LongAttribute"));
     }
 }
diff --git a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
index 15c714b63c2b86e156b43cdaec390ddf60eb7353..29e6f0a56d34935ab5c061897a27b902e03db790 100644
--- a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
+++ b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
@@ -70,12 +70,14 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(forwardDims)",
           for (uint16_t i = 0; i < nb_dims; ++i) {
             dims[i] = dimsDist(gen) + 1;
           }
-          std::vector<DimSize_t> dims_out{dims[0], dims[1]};
+          std::vector<DimSize_t> dims_out(nb_dims, 1);
+          dims_out[0] = dims[0];
+          dims_out[1] = dims[1];
           input_T->resize(dims);
           op->setInput(0, input_T);
           REQUIRE_NOTHROW(op->forwardDims());
           REQUIRE(op->getOutput(0)->dims() == dims_out);
-          REQUIRE((op->getOutput(0)->dims().size()) == static_cast<size_t>(2));
+          REQUIRE((op->getOutput(0)->dims().size()) == nb_dims);
         }
       }
     }
diff --git a/unit_tests/operator/Test_Heaviside_Op.cpp b/unit_tests/operator/Test_Heaviside_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d53268d1213730dc6202193d26b0531e781518f7
--- /dev/null
+++ b/unit_tests/operator/Test_Heaviside_Op.cpp
@@ -0,0 +1,70 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <cstddef> // std::size_t
+#include <memory>
+#include <random>  // std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Heaviside.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+
+TEST_CASE("[core/operator] Heaviside_Op(forwardDims)",
+          "[Heaviside][forwardDims]") {
+
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    auto rd = Catch::Generators::Detail::getSeed;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Heaviside Operator
+    std::shared_ptr<Node> myHeaviside = Heaviside(0.5);
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(myHeaviside->getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op->associateInput(0, T0);
+
+    SECTION("Scalar") {
+        // input 0
+        T0->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+
+    SECTION("+1-D Tensor") {
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index d1b4e2e31e8c57e2c3eebd42019ba9f42c4d39e0..6711e1524fe0595b4effd68397f8cb684df590a9 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -34,10 +34,10 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
 
         REQUIRE(microGraph->getNodes().size() == 2);
         REQUIRE(microGraph->inputNodes().size() == 2);  // 2 because Conv has inputs outside the meta-op (Producers for weight and bias)
-        REQUIRE(nodePtrTo(microGraph->getOrderedInputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"Pad", 0}, {"Conv", 1}, {"Conv", 2}}));
-        REQUIRE(nodePtrTo(microGraph->getOrderedOutputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"Conv", 0}}));
+        REQUIRE(nodePtrTo(microGraph->getOrderedInputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"Pad2D", 0}, {"Conv2D", 1}, {"Conv2D", 2}}));
+        REQUIRE(nodePtrTo(microGraph->getOrderedOutputs()) == std::vector<std::pair<std::string, IOIndex_t>>({{"Conv2D", 0}}));
         REQUIRE(microGraph->outputNodes().size() == 1);
-        REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv");
+        REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv2D");
         REQUIRE(op->nbInputs() == 3);
         REQUIRE(op->inputCategory(0) == InputCategory::Data);
         REQUIRE(op->inputCategory(1) == InputCategory::Param);
diff --git a/unit_tests/operator/Test_Operator.cpp b/unit_tests/operator/Test_Operator.cpp
index a050bbc4021b0c70a0d8faf6478eb2bd13ebdb58..6bd12c51ef367ad1cf1859afc56af8a21a706237 100644
--- a/unit_tests/operator/Test_Operator.cpp
+++ b/unit_tests/operator/Test_Operator.cpp
@@ -26,7 +26,7 @@ namespace Aidge {
 // TEST_CASE("[core/operator] Operator(computeReceptiveField)", "[Operator][computeReceptiveFiled]") {
 //     auto dataProvider1 = Producer({16, 3, 224, 224}, "dataProvider1");
 //     auto dataProvider2 = Producer({16, 3, 224, 224}, "dataProvider2");
-//     auto gen1 = Add(2);
+//     auto gen1 = Add();
 //     auto gen2 = ReLU();
 
 //     auto g = std::make_shared<GraphView>("TestGraph");
diff --git a/unit_tests/operator/Test_PopImpl.cpp b/unit_tests/operator/Test_PopImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d3c87ef7289e4516442885f7449060055c428c49
--- /dev/null
+++ b/unit_tests/operator/Test_PopImpl.cpp
@@ -0,0 +1,37 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Pop.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Pop(forward)", "[Pop][CPU]") {
+    std::shared_ptr<Tensor> pop1 = std::make_shared<Tensor>(Array1D<int,3>{{4,5,6}});
+    std::shared_ptr<Tensor> pop2 = std::make_shared<Tensor>(Array1D<int,3>{{1,2,3}});
+    std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,2,3>{{{1,2,3}, {4,5,6}}});
+
+    auto pop = Pop("pop");
+    std::shared_ptr<Pop_Op> op = std::static_pointer_cast<Pop_Op>(pop->getOperator());
+    op->associateInput(0, input);
+    op->setBackend("cpu");
+    op->setDataType(DataType::Int32);
+    op->forwardDims();
+
+    REQUIRE_NOTHROW(pop->forward());
+    REQUIRE(*op->getOutput(0) == *pop2);
+    REQUIRE_NOTHROW(pop->forward());
+    REQUIRE(*op->getOutput(0) == *pop1);
+}
diff --git a/unit_tests/operator/Test_Resize_Op.cpp b/unit_tests/operator/Test_Resize_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..111e8fb4f62040127f8b5da8125ba9d91c546f23
--- /dev/null
+++ b/unit_tests/operator/Test_Resize_Op.cpp
@@ -0,0 +1,180 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Resize.hpp"
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef> // std::size_t
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Log.hpp"
+
+namespace Aidge {
+
+
+TEST_CASE("[core/operator] Resize_Op(forwardDims)",
+          "[Resize][forwardDimsScales]") {
+  std::vector<Aidge::DimSize_t> input_dims;
+  std::vector<float> scales;
+  std::vector<std::size_t> sizes;
+  std::vector<Aidge::DimSize_t> expected_dims;
+
+  SECTION("Un-connected input leads to failure.") {
+    input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
+    std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+    auto resize_node = Resize();
+    auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+    op->associateInput(0, input_data);
+
+    REQUIRE_THROWS(op->forwardDims(true));
+  }
+
+  SECTION("Connecting both Scales & Sizes leads to failure") {
+    input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
+    std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+    scales = std::vector<float>({.5, 3.0f, 2.0f, 2.0f});
+    sizes = std::vector<std::size_t>({1, 3, 4, 4});
+    expected_dims = std::vector<Aidge::DimSize_t>({2, 3, 4, 4});
+
+    auto resize_node = Resize(scales, sizes);
+    auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+    op->associateInput(0, input_data);
+
+    REQUIRE_THROWS(op->forwardDims(true));
+  }
+
+  SECTION("Input Scales") {
+    SECTION("TEST 1") {
+      input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({1, 1, 2, 2});
+      sizes = std::vector<std::size_t>({});
+      expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 4, 4});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+
+    SECTION("TEST 2") {
+      input_dims = std::vector<Aidge::DimSize_t>({4, 4, 10, 10});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({1, 1, 2, 3});
+      sizes = std::vector<std::size_t>({});
+      expected_dims = std::vector<Aidge::DimSize_t>({4, 4, 20, 30});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 3") {
+      input_dims = std::vector<Aidge::DimSize_t>({4, 2, 10, 10});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({1, 1, 0.5, 0.5});
+      sizes = std::vector<std::size_t>({});
+      expected_dims = std::vector<Aidge::DimSize_t>({4, 2, 5, 5});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 4") {
+      input_dims = std::vector<Aidge::DimSize_t>({11, 11, 4, 4});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+
+      scales = std::vector<float>({1, 1, 0.3, 0.3});
+      sizes = std::vector<std::size_t>({});
+      expected_dims = std::vector<Aidge::DimSize_t>({11, 11, 1, 1});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+  }
+
+  SECTION("Input Sizes") {
+    SECTION("TEST 1") {
+      input_dims = std::vector<Aidge::DimSize_t>({1, 1, 2, 2});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({});
+      sizes = std::vector<std::size_t>({4, 5, 8, 8});
+      expected_dims = std::vector<Aidge::DimSize_t>({4, 5, 8, 8});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 2") {
+      input_dims = std::vector<Aidge::DimSize_t>({60, 60, 30, 30});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({});
+      sizes = std::vector<std::size_t>({1, 1, 75, 75});
+      expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 75, 75});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 3") {
+      input_dims = std::vector<Aidge::DimSize_t>({11, 11, 20, 20});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({});
+      sizes = std::vector<std::size_t>({19, 6, 8, 8});
+      expected_dims = std::vector<Aidge::DimSize_t>({19, 6, 8, 8});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+    SECTION("TEST 4") {
+      input_dims = std::vector<Aidge::DimSize_t>({43, 211, 22, 22});
+      std::shared_ptr<Tensor> input_data = std::make_shared<Tensor>(input_dims);
+
+      scales = std::vector<float>({});
+      sizes = std::vector<std::size_t>({1, 1, 10, 10});
+      expected_dims = std::vector<Aidge::DimSize_t>({1, 1, 10, 10});
+      auto resize_node = Resize(scales, sizes);
+      auto op = std::static_pointer_cast<Resize_Op>(resize_node->getOperator());
+      op->associateInput(0, input_data);
+
+      REQUIRE_NOTHROW(op->forwardDims(true));
+      REQUIRE(op->getOutput(0)->dims() == expected_dims);
+    }
+  }
+}
+
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Squeeze_Op.cpp b/unit_tests/operator/Test_Squeeze_Op.cpp
index 471a1dcd1e45384b2c65da75ddee9d3ec039dc34..660e970dd65bb7c4b9a52b0ccb62350ca355d243 100644
--- a/unit_tests/operator/Test_Squeeze_Op.cpp
+++ b/unit_tests/operator/Test_Squeeze_Op.cpp
@@ -156,7 +156,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
     }
     SECTION("axes is given via tensor") {
       SECTION("tensor is empty") {
-        // arguments here should be overriden by axes_T values
+        // arguments here should be overridden by axes_T values
         std::shared_ptr<Node> myUnsqueeze =
             Squeeze(std::vector<std::int8_t>({0, 4}));
         auto op = std::static_pointer_cast<OperatorTensor>(
@@ -177,7 +177,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
         CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({3, 4, 5}));
       }
       SECTION("tensor not empty") {
-        // arguments here should be overriden by axes_T values
+        // arguments here should be overridden by axes_T values
         std::shared_ptr<Node> myUnsqueeze =
             Squeeze(std::vector<std::int8_t>({3, 1}));
         auto op = std::static_pointer_cast<OperatorTensor>(
diff --git a/unit_tests/operator/Test_StackImpl.cpp b/unit_tests/operator/Test_StackImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ccdf5787d666f030b8856704eb0e4fb108089075
--- /dev/null
+++ b/unit_tests/operator/Test_StackImpl.cpp
@@ -0,0 +1,169 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <catch2/matchers/catch_matchers_string.hpp>
+#include <cstddef>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <stdexcept>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Stack.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+
+using Catch::Matchers::Equals;
+
+namespace Aidge {
+
+TEST_CASE("[core/operator] Stack(forward)", "[Stack]") {
+    constexpr auto nbTrials = 10;
+    auto rd = Catch::Generators::Detail::getSeed;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> nbDist(1, 100);
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(2, 5);
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f);
+    // std::uniform_int_distribution<std::size_t> tensorNbDimsDist(2U, 5U);
+
+    const std::size_t nbDimsTensor = nbDimsDist(gen);
+    std::vector<std::size_t> dimsIn(nbDimsTensor);
+    std::shared_ptr<Tensor> t1 = std::make_shared<Tensor>();
+
+    SECTION("Constructors") {
+        // Valid arguments
+        for (auto i = 0; i < nbTrials; ++i) {
+            auto maxElements = nbDist(gen);
+            REQUIRE_NOTHROW(StackOp(maxElements));
+
+            auto op1 = StackOp(maxElements);
+            REQUIRE(op1.maxElements() == maxElements);
+            REQUIRE(op1.forwardStep() == 0);
+
+            // Copy Constructor
+            auto op2 = op1;
+            REQUIRE(op2.maxElements() == maxElements);
+            REQUIRE(op2.forwardStep() == 0);
+        }
+    }
+
+    SECTION("forwardDims") {
+        for (auto i = 0; i < nbTrials; ++i) {
+            auto maxElements = nbDist(gen);
+            auto op = StackOp(maxElements);
+
+            const std::size_t nbDims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nbDims);
+            for (std::size_t i = 0; i < nbDims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            t1->resize(dims);
+
+            REQUIRE_THROWS_WITH(
+                op.forwardDims(),
+                Equals("Stack: input #0 should be associated with a Tensor"));
+            op.associateInput(0, t1);
+            REQUIRE_NOTHROW(op.forwardDims());
+            REQUIRE(op.getOutput(0)->dims()[0] == maxElements);
+        }
+    }
+
+    SECTION("forward") {
+
+        std::generate(dimsIn.begin(), dimsIn.end(), [&gen, &dimsDist]() {
+            return dimsDist(gen);
+        });
+        const std::size_t nbElems =
+            std::accumulate(dimsIn.cbegin(),
+                            dimsIn.cend(),
+                            1u,
+                            std::multiplies<std::size_t>());
+
+        std::uniform_int_distribution<std::size_t> numTensorsDist(2, 10);
+        const std::size_t numTensors = numTensorsDist(gen);
+
+        std::vector<std::shared_ptr<Tensor>> tensors(numTensors);
+        std::vector<float *> arrays(numTensors);
+
+        for (std::size_t i = 0; i < numTensors; ++i) {
+            arrays[i] = new float[nbElems];
+            for (std::size_t j = 0; j < nbElems; ++j) {
+                arrays[i][j] = valueDist(gen);
+            }
+            tensors[i] = std::make_shared<Tensor>();
+            tensors[i]->resize(dimsIn);
+            tensors[i]->setBackend("cpu");
+            tensors[i]->setDataType(DataType::Float32);
+            tensors[i]->getImpl()->setRawPtr(arrays[i], nbElems);
+        }
+
+        auto myStack = Stack(numTensors);
+        myStack->getOperator()->setBackend("cpu");
+        myStack->getOperator()->setDataType(DataType::Float32);
+        auto op =
+            std::static_pointer_cast<OperatorTensor>(myStack->getOperator());
+
+        op->associateInput(0, tensors[0]);
+        op->forwardDims();
+        op->getOutput(0)->zeros();
+
+        // Perform forward passes for each tensor
+        for (std::size_t i = 0; i < numTensors; ++i) {
+            if (i > 0) {
+                op->associateInput(0, tensors[i]);
+            }
+            op->forward();
+        }
+
+        auto output = op->getOutput(0);
+
+        std::vector<DimSize_t> expectedDims = dimsIn;
+        expectedDims.insert(expectedDims.begin(), numTensors);
+
+        REQUIRE(output->dims() == expectedDims);
+
+        // Compare output slices with input tensors
+        for (std::size_t i = 0; i < numTensors; ++i) {
+            Tensor outputTensor = output->extract(
+                {static_cast<std::uint64_t>(static_cast<int>(i))});
+            Tensor inputTensor(DataType::Float32);
+            inputTensor.resize(dimsIn);
+            inputTensor.setBackend("cpu");
+            inputTensor.getImpl()->setRawPtr(arrays[i], nbElems);
+
+            REQUIRE(approxEq<float>(outputTensor, inputTensor));
+        }
+
+        // Attempt to exceed maxElements
+        std::shared_ptr<Tensor> extraTensor = std::make_shared<Tensor>();
+        extraTensor->resize(dimsIn);
+        extraTensor->setBackend("cpu");
+        extraTensor->setDataType(DataType::Float32);
+        float *extraArray = new float[nbElems];
+        for (std::size_t j = 0; j < nbElems; ++j) {
+            extraArray[j] = valueDist(gen);
+        }
+        extraTensor->getImpl()->setRawPtr(extraArray, nbElems);
+
+        REQUIRE_THROWS_AS((op->associateInput(0, extraTensor), op->forward()),
+                          std::runtime_error);
+
+        // Clean up
+        delete[] extraArray;
+        for (std::size_t i = 0; i < numTensors; ++i) {
+            delete[] arrays[i];
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp
index 18f0d68d87ac1ee66ffb1f24c4c130f9b020d56e..02b338dd265ceb4d9e03bbf86398cc8db38045df 100644
--- a/unit_tests/operator/Test_TransposeImpl.cpp
+++ b/unit_tests/operator/Test_TransposeImpl.cpp
@@ -128,6 +128,75 @@ TEST_CASE("[cpu/operator] Transpose(forward)") {
         op->setBackend("cpu");
         myTranspose->forward();
 
+        REQUIRE(*(op->getOutput(0)) == *output);
+    }
+    SECTION("Default permutation") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,2,3,1,4> {
+            {
+                {
+                    {
+                        {1, 2, 3, 4}
+                    },
+                    {
+                        {5, 6, 7, 8}
+                    },
+                    {
+                        {9, 10, 11, 12}
+                    }
+                },
+                {
+                    {
+                        {13, 14, 15, 16}
+                    },
+                    {
+                        {17, 18, 19, 20}
+                    },
+                    {
+                        {21, 22, 23, 24}
+                    }
+                }
+            }
+        });
+
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array4D<int,4,1,3,2> {
+            {
+                {
+                        {
+                            { 1, 13},
+                            { 5, 17},
+                            { 9, 21}
+                        }
+                    },
+                    {
+                        {
+                            { 2, 14},
+                            { 6, 18},
+                            {10, 22}
+                        }
+                    },
+                    {
+                        {
+                            { 3, 15},
+                            { 7, 19},
+                            {11, 23}
+                        }
+                    },
+                    {
+                        {
+                            { 4, 16},
+                            { 8, 20},
+                            {12, 24}
+                        }
+                    }
+            }
+        });
+        std::shared_ptr<Node> myTranspose = Transpose({});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myTranspose->forward();
+
         REQUIRE(*(op->getOutput(0)) == *output);
     }
 }
diff --git a/unit_tests/operator/Test_Unsqueeze_Op.cpp b/unit_tests/operator/Test_Unsqueeze_Op.cpp
index 79f5b89b1c08f409b214a9439431c2d2a51ddbd2..a436ab5a54e2f66fe87bfc28157d691cf6548dd4 100644
--- a/unit_tests/operator/Test_Unsqueeze_Op.cpp
+++ b/unit_tests/operator/Test_Unsqueeze_Op.cpp
@@ -177,7 +177,7 @@ TEST_CASE("[core/operator] Unsqueeze(forwardDims)",
       }
     }
     SECTION("axes is given via tensor") {
-        // arguments here should be overriden by axes_T values
+        // arguments here should be overridden by axes_T values
         std::shared_ptr<Node> myUnsqueeze =
             Unsqueeze(std::vector<std::int8_t>({0, 4}));
         auto op = std::static_pointer_cast<OperatorTensor>(
diff --git a/unit_tests/recipes/Test_ExplicitTranspose.cpp b/unit_tests/recipes/Test_ExplicitTranspose.cpp
index 0c0a46710d69606508a22e7b01dac708db9b8f34..bb89ba7952347a779e6979e7cf3c4f1bd68abf9b 100644
--- a/unit_tests/recipes/Test_ExplicitTranspose.cpp
+++ b/unit_tests/recipes/Test_ExplicitTranspose.cpp
@@ -41,11 +41,11 @@ TEST_CASE("[ExplicitTranspose] conv") {
     g1->forwardDims();
     explicitTranspose(g1);
 
-    // Check that Tranpose were inserted
+    // Check that Transpose were inserted
     g1->save("explicitTranspose_after");
     REQUIRE(g1->getNodes().size() == 12);
 
-    // Check that Tranpose are removed
+    // Check that Transpose are removed
     conv2->getOperator()->setDataFormat(DataFormat::NCHW);
     explicitTranspose(g1);
 
diff --git a/unit_tests/recipes/Test_FuseToMetaOps.cpp b/unit_tests/recipes/Test_FuseToMetaOps.cpp
index 9fceedf2feef0a3ed79b83a8494a1a2b49f77291..7bb3ae63add6568f7de08a996b27a495a644cf46 100644
--- a/unit_tests/recipes/Test_FuseToMetaOps.cpp
+++ b/unit_tests/recipes/Test_FuseToMetaOps.cpp
@@ -35,7 +35,7 @@ TEST_CASE("[cpu/recipes] FuseToMetaOps", "[FuseToMetaOps][recipes]") {
     g1->save("FuseToMetaOps_before");
 
     // FIXME: GraphRegex also matches the Conv Producers, which are not in the query!
-    const auto nbFused = fuseToMetaOps(g1, "Conv->ReLU", "ConvReLU");
+    const auto nbFused = fuseToMetaOps(g1, "Conv2D->ReLU", "ConvReLU");
     g1->save("FuseToMetaOps_after", true);
 
     REQUIRE(nbFused == 2);
diff --git a/unit_tests/recipes/Test_LabelGraph.cpp b/unit_tests/recipes/Test_LabelGraph.cpp
index 78f67d823a17454c1ecff40a2307556c990c4f53..82dae3c48c137b12b8c0816fadcf40a1251137d7 100644
--- a/unit_tests/recipes/Test_LabelGraph.cpp
+++ b/unit_tests/recipes/Test_LabelGraph.cpp
@@ -44,11 +44,11 @@ TEST_CASE("[LabelGraph] conv") {
 
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
-        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling2D");
     }
 }
 
@@ -73,11 +73,11 @@ TEST_CASE("[LabelGraph] deleted node") {
 
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
-        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling2D");
     }
 
     SECTION("Check dimensions") {
@@ -111,11 +111,11 @@ TEST_CASE("[LabelGraph] deleted nodes") {
 
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
-        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv1")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("conv1")->getOperator()->getRawOutput(0) == g2->getNode("conv2")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv2")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("conv2")->getOperator()->getRawOutput(0) == g2->getNode("conv3")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("conv3")->getOperator()->type() == "MaxPooling2D");
     }
 }
 
@@ -139,11 +139,11 @@ TEST_CASE("[LabelGraph] pooling") {
 
     SECTION("Check resulting nodes") {
         REQUIRE(g2->getNodes().size() == 3);
-        REQUIRE(g2->getNode("pool1")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("pool1")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("pool1")->getOperator()->getRawOutput(0) == g2->getNode("pool2")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("pool2")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("pool2")->getOperator()->type() == "MaxPooling2D");
         REQUIRE(g2->getNode("pool2")->getOperator()->getRawOutput(0) == g2->getNode("pool3")->getOperator()->getRawInput(0));
-        REQUIRE(g2->getNode("pool3")->getOperator()->type() == "MaxPooling");
+        REQUIRE(g2->getNode("pool3")->getOperator()->type() == "MaxPooling2D");
     }
 
     SECTION("Check dimensions") {
diff --git a/unit_tests/recipes/Test_MatMulToFC.cpp b/unit_tests/recipes/Test_MatMulToFC.cpp
index 2adf882ca69e0d5ca5f050d1b89cfb09d81b536b..28eae0be17297467a29eab4e868e074c336d4a12 100644
--- a/unit_tests/recipes/Test_MatMulToFC.cpp
+++ b/unit_tests/recipes/Test_MatMulToFC.cpp
@@ -27,9 +27,9 @@ TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") {
     SECTION("with Add") {
         // generate the original GraphView
         auto matmul0 = MatMul("matmul0");
-        auto add0 = Add(2, "add0");
+        auto add0 = Add("add0");
         auto matmul1 = MatMul("matmul1");
-        auto add1 = Add(2, "add1");
+        auto add1 = Add("add1");
 
         auto b0 = Producer({5}, "B0");
         auto w0 = Producer({5, 5}, "W0");
@@ -76,7 +76,7 @@ TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") {
         // generate the original GraphView
         auto matmul0 = MatMul("matmul0");
         auto matmul1 = MatMul("matmul1");
-        auto add1 = Add(2, "add1");
+        auto add1 = Add("add1");
 
         auto w0 = Producer({5, 5}, "W0");
         auto b1 = Producer({5}, "B1");
diff --git a/unit_tests/recipes/Test_removeConstantOfShape.cpp b/unit_tests/recipes/Test_removeConstantOfShape.cpp
index 247149a0fdb1087f14ac17d125659d677ccfb506..b912efc640fc901f694afeda256be91d51010419 100644
--- a/unit_tests/recipes/Test_removeConstantOfShape.cpp
+++ b/unit_tests/recipes/Test_removeConstantOfShape.cpp
@@ -32,8 +32,8 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/recipies] removeConstantOfShape",
-          "[ConstantOfShape][removeConstantOfShape][recipies]") {
+TEST_CASE("[cpu/recipes] removeConstantOfShape",
+          "[ConstantOfShape][removeConstantOfShape][recipes]") {
   auto input_T = std::make_shared<Tensor>(Array1D<int64_t, 4>({1, 1, 3, 3}));
 
   auto model = std::make_shared<GraphView>();
diff --git a/unit_tests/recipes/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp
index c3b4c08d98115c9f081bbbf8cb677114b66c545a..1b5e2783813da890b1e79744582f54bb5c932772 100644
--- a/unit_tests/recipes/Test_removeFlatten.cpp
+++ b/unit_tests/recipes/Test_removeFlatten.cpp
@@ -24,7 +24,7 @@
 
 namespace Aidge {
 
-TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
+TEST_CASE("[cpu/recipes] RemoveFlatten", "[RemoveFlatten][recipes]") {
   std::shared_ptr<Node> flatten =
       GenericOperator("Flatten", 1, 0, 1, "myFlatten");
   std::shared_ptr<Node> fc0 = FC(10, 10, false, "FC_1");
diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp
index 3c3026ff09222f9623d886f9c4574bf23667cd9a..ec850d28109a2682bb762c89e814622de6eec3d8 100644
--- a/unit_tests/scheduler/Test_Scheduler.cpp
+++ b/unit_tests/scheduler/Test_Scheduler.cpp
@@ -26,6 +26,8 @@
 #include "aidge/graph/Testing.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/Producer.hpp"
+#include "aidge/operator/Identity.hpp"
+#include "aidge/operator/GenericOperator.hpp"
 #include "aidge/scheduler/SequentialScheduler.hpp"
 
 namespace Aidge {
@@ -134,4 +136,58 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") {
   fmt::print("nbUnicity = {}/{}\n", nbUnicity, nbTests);
 }
 
+TEST_CASE("someScheduling", "[Scheduler][someUseCases]") {
+
+  SECTION("Identity Graph") {
+    auto data1 = Producer({1}, "data1");
+    auto identity = Identity("id");
+    auto g = std::make_shared<GraphView>("TestGraph");
+    data1->addChild(identity);
+    g->add({data1, identity});
+    auto scheduler = SequentialScheduler(g);
+    scheduler.generateScheduling();
+    const auto sch = scheduler.getStaticScheduling();
+    const auto nodes = g->getNodes();
+    REQUIRE(sch.size() == nodes.size());
+    REQUIRE(sch[0] == data1);
+    REQUIRE(sch[1] == identity);
+  }
+
+  SECTION("Producer Graph") {
+    auto data1 = Producer({1}, "data1");
+    auto g = std::make_shared<GraphView>("TestGraph");
+    g->add({data1});
+    auto scheduler = SequentialScheduler(g);
+    scheduler.generateScheduling();
+    const auto sch = scheduler.getStaticScheduling();
+    const auto nodes = g->getNodes();
+    REQUIRE(sch.size() == nodes.size());
+    REQUIRE(sch[0] == data1);
+  }
+
+  SECTION("Generic producer Graph") {
+    auto gen1 = GenericOperator("Prod", 0, 0, 1, "gen1");
+    auto g = std::make_shared<GraphView>("TestGraph");
+    g->add({gen1});
+    auto scheduler = SequentialScheduler(g);
+    scheduler.generateScheduling();
+    const auto sch = scheduler.getStaticScheduling();
+    const auto nodes = g->getNodes();
+    REQUIRE(sch.size() == nodes.size());
+    REQUIRE(sch[0] == gen1);
+  }
+
+  SECTION("No output Graph") {
+    auto dead1 = GenericOperator("Dead", 1, 0, 0, "dead");
+    auto g = std::make_shared<GraphView>("TestGraph");
+    g->add({dead1});
+    auto scheduler = SequentialScheduler(g);
+    scheduler.generateScheduling();
+    const auto sch = scheduler.getStaticScheduling();
+    const auto nodes = g->getNodes();
+    REQUIRE(nodes.size() == 1);
+    REQUIRE(sch.size() == 0);
+  }
+}
+
 } // namespace Aidge
diff --git a/unit_tests/utils/Test_StaticAttributes.cpp b/unit_tests/utils/Test_StaticAttributes.cpp
index 36c2e0454b415e1cb25cc3581016530a372b9e65..b23f8683e0ae3a2de805770556fefdd66722460d 100644
--- a/unit_tests/utils/Test_StaticAttributes.cpp
+++ b/unit_tests/utils/Test_StaticAttributes.cpp
@@ -43,6 +43,6 @@ TEST_CASE("[core/attributes] StaticAttribute") {
             attr<TestAttr::d>({true, false, true}));
 
         REQUIRE(attrs.getAttr<int>("a") == 42);
-        REQUIRE_THROWS(attrs.getAttr<int>("inexistant"));
+        REQUIRE_THROWS(attrs.getAttr<int>("inexistent"));
     }
 }
diff --git a/version.txt b/version.txt
index 0d91a54c7d439e84e3dd17d3594f1b2b6737f430..1d0ba9ea182b0f7354f3daf12120744ec5e0c2f8 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.3.0
+0.4.0